xref: /openbmc/qemu/hw/cxl/cxl-mailbox-utils.c (revision ba31a6fca7b03fce47228423b4dc951e04efd96c)
1 /*
2  * CXL Utility library for mailbox interface
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include <math.h>
12 
13 #include "hw/pci/msi.h"
14 #include "hw/pci/msix.h"
15 #include "hw/cxl/cxl.h"
16 #include "hw/cxl/cxl_events.h"
17 #include "hw/cxl/cxl_mailbox.h"
18 #include "hw/pci/pci.h"
19 #include "hw/pci-bridge/cxl_upstream_port.h"
20 #include "qemu/cutils.h"
21 #include "qemu/host-utils.h"
22 #include "qemu/log.h"
23 #include "qemu/units.h"
24 #include "qemu/uuid.h"
25 #include "system/hostmem.h"
26 #include "qemu/range.h"
27 #include "qapi/qapi-types-cxl.h"
28 
29 #define CXL_CAPACITY_MULTIPLIER   (256 * MiB)
30 #define CXL_DC_EVENT_LOG_SIZE 8
31 #define CXL_NUM_TAGS_SUPPORTED 0
32 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0)
33 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1)
34 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2)
35 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3)
36 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4)
37 
38 /*
39  * How to add a new command, example. The command set FOO, with cmd BAR.
40  *  1. Add the command set and cmd to the enum.
41  *     FOO    = 0x7f,
42  *          #define BAR 0
43  *  2. Implement the handler
44  *    static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
45  *                                  CXLDeviceState *cxl_dstate, uint16_t *len)
46  *  3. Add the command to the cxl_cmd_set[][]
47  *    [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
48  *  4. Implement your handler
49  *     define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
50  *
51  *
52  *  Writing the handler:
53  *    The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
54  *    in/out length of the payload. The handler is responsible for consuming the
55  *    payload from cmd->payload and operating upon it as necessary. It must then
56  *    fill the output data into cmd->payload (overwriting what was there),
57  *    setting the length, and returning a valid return code.
58  *
59  *  XXX: The handler need not worry about endianness. The payload is read out of
60  *  a register interface that already deals with it.
61  */
62 
63 enum {
64     INFOSTAT    = 0x00,
65         #define IS_IDENTIFY   0x1
66         #define BACKGROUND_OPERATION_STATUS    0x2
67         #define GET_RESPONSE_MSG_LIMIT         0x3
68         #define SET_RESPONSE_MSG_LIMIT         0x4
69         #define BACKGROUND_OPERATION_ABORT     0x5
70     EVENTS      = 0x01,
71         #define GET_RECORDS   0x0
72         #define CLEAR_RECORDS   0x1
73         #define GET_INTERRUPT_POLICY   0x2
74         #define SET_INTERRUPT_POLICY   0x3
75     FIRMWARE_UPDATE = 0x02,
76         #define GET_INFO      0x0
77         #define TRANSFER      0x1
78         #define ACTIVATE      0x2
79     TIMESTAMP   = 0x03,
80         #define GET           0x0
81         #define SET           0x1
82     LOGS        = 0x04,
83         #define GET_SUPPORTED 0x0
84         #define GET_LOG       0x1
85     FEATURES    = 0x05,
86         #define GET_SUPPORTED 0x0
87         #define GET_FEATURE   0x1
88         #define SET_FEATURE   0x2
89     IDENTIFY    = 0x40,
90         #define MEMORY_DEVICE 0x0
91     CCLS        = 0x41,
92         #define GET_PARTITION_INFO     0x0
93         #define GET_LSA       0x2
94         #define SET_LSA       0x3
95     HEALTH_INFO_ALERTS = 0x42,
96         #define GET_ALERT_CONFIG 0x1
97         #define SET_ALERT_CONFIG 0x2
98     SANITIZE    = 0x44,
99         #define OVERWRITE     0x0
100         #define SECURE_ERASE  0x1
101         #define MEDIA_OPERATIONS 0x2
102     PERSISTENT_MEM = 0x45,
103         #define GET_SECURITY_STATE     0x0
104     MEDIA_AND_POISON = 0x43,
105         #define GET_POISON_LIST        0x0
106         #define INJECT_POISON          0x1
107         #define CLEAR_POISON           0x2
108         #define GET_SCAN_MEDIA_CAPABILITIES 0x3
109         #define SCAN_MEDIA             0x4
110         #define GET_SCAN_MEDIA_RESULTS 0x5
111     DCD_CONFIG  = 0x48,
112         #define GET_DC_CONFIG          0x0
113         #define GET_DYN_CAP_EXT_LIST   0x1
114         #define ADD_DYN_CAP_RSP        0x2
115         #define RELEASE_DYN_CAP        0x3
116     PHYSICAL_SWITCH = 0x51,
117         #define IDENTIFY_SWITCH_DEVICE      0x0
118         #define GET_PHYSICAL_PORT_STATE     0x1
119     TUNNEL = 0x53,
120         #define MANAGEMENT_COMMAND     0x0
121     FMAPI_DCD_MGMT = 0x56,
122         #define GET_DCD_INFO    0x0
123         #define GET_HOST_DC_REGION_CONFIG   0x1
124         #define SET_DC_REGION_CONFIG        0x2
125         #define GET_DC_REGION_EXTENT_LIST   0x3
126         #define INITIATE_DC_ADD             0x4
127         #define INITIATE_DC_RELEASE         0x5
128 };
129 
130 /* CCI Message Format CXL r3.1 Figure 7-19 */
131 typedef struct CXLCCIMessage {
132     uint8_t category;
133 #define CXL_CCI_CAT_REQ 0
134 #define CXL_CCI_CAT_RSP 1
135     uint8_t tag;
136     uint8_t resv1;
137     uint8_t command;
138     uint8_t command_set;
139     uint8_t pl_length[3];
140     uint16_t rc;
141     uint16_t vendor_specific;
142     uint8_t payload[];
143 } QEMU_PACKED CXLCCIMessage;
144 
145 /* This command is only defined to an MLD FM Owned LD or an MHD */
cmd_tunnel_management_cmd(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)146 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
147                                             uint8_t *payload_in,
148                                             size_t len_in,
149                                             uint8_t *payload_out,
150                                             size_t *len_out,
151                                             CXLCCI *cci)
152 {
153     PCIDevice *tunnel_target;
154     CXLCCI *target_cci;
155     struct {
156         uint8_t port_or_ld_id;
157         uint8_t target_type;
158         uint16_t size;
159         CXLCCIMessage ccimessage;
160     } QEMU_PACKED *in;
161     struct {
162         uint16_t resp_len;
163         uint8_t resv[2];
164         CXLCCIMessage ccimessage;
165     } QEMU_PACKED *out;
166     size_t pl_length, length_out;
167     bool bg_started;
168     int rc;
169 
170     if (cmd->in < sizeof(*in)) {
171         return CXL_MBOX_INVALID_INPUT;
172     }
173     in = (void *)payload_in;
174     out = (void *)payload_out;
175 
176     if (len_in < sizeof(*in)) {
177         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
178     }
179     /* Enough room for minimum sized message - no payload */
180     if (in->size < sizeof(in->ccimessage)) {
181         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
182     }
183     /* Length of input payload should be in->size + a wrapping tunnel header */
184     if (in->size != len_in - offsetof(typeof(*out), ccimessage)) {
185         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
186     }
187     if (in->ccimessage.category != CXL_CCI_CAT_REQ) {
188         return CXL_MBOX_INVALID_INPUT;
189     }
190 
191     if (in->target_type != 0) {
192         qemu_log_mask(LOG_UNIMP,
193                       "Tunneled Command sent to non existent FM-LD");
194         return CXL_MBOX_INVALID_INPUT;
195     }
196 
197     /*
198      * Target of a tunnel unfortunately depends on type of CCI readint
199      * the message.
200      * If in a switch, then it's the port number.
201      * If in an MLD it is the ld number.
202      * If in an MHD target type indicate where we are going.
203      */
204     if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
205         CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
206         if (in->port_or_ld_id != 0) {
207             /* Only pretending to have one for now! */
208             return CXL_MBOX_INVALID_INPUT;
209         }
210         target_cci = &ct3d->ld0_cci;
211     } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
212         CXLUpstreamPort *usp = CXL_USP(cci->d);
213 
214         tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus,
215                                              in->port_or_ld_id);
216         if (!tunnel_target) {
217             return CXL_MBOX_INVALID_INPUT;
218         }
219         tunnel_target =
220             pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0];
221         if (!tunnel_target) {
222             return CXL_MBOX_INVALID_INPUT;
223         }
224         if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) {
225             CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target);
226             /* Tunneled VDMs always land on FM Owned LD */
227             target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci;
228         } else {
229             return CXL_MBOX_INVALID_INPUT;
230         }
231     } else {
232         return CXL_MBOX_INVALID_INPUT;
233     }
234 
235     pl_length = in->ccimessage.pl_length[2] << 16 |
236         in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0];
237     rc = cxl_process_cci_message(target_cci,
238                                  in->ccimessage.command_set,
239                                  in->ccimessage.command,
240                                  pl_length, in->ccimessage.payload,
241                                  &length_out, out->ccimessage.payload,
242                                  &bg_started);
243     /* Payload should be in place. Rest of CCI header and needs filling */
244     out->resp_len = length_out + sizeof(CXLCCIMessage);
245     st24_le_p(out->ccimessage.pl_length, length_out);
246     out->ccimessage.rc = rc;
247     out->ccimessage.category = CXL_CCI_CAT_RSP;
248     out->ccimessage.command = in->ccimessage.command;
249     out->ccimessage.command_set = in->ccimessage.command_set;
250     out->ccimessage.tag = in->ccimessage.tag;
251     *len_out = length_out + sizeof(*out);
252 
253     return CXL_MBOX_SUCCESS;
254 }
255 
cmd_events_get_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)256 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
257                                          uint8_t *payload_in, size_t len_in,
258                                          uint8_t *payload_out, size_t *len_out,
259                                          CXLCCI *cci)
260 {
261     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
262     CXLGetEventPayload *pl;
263     uint8_t log_type;
264     int max_recs;
265 
266     if (cmd->in < sizeof(log_type)) {
267         return CXL_MBOX_INVALID_INPUT;
268     }
269 
270     log_type = payload_in[0];
271 
272     pl = (CXLGetEventPayload *)payload_out;
273 
274     max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
275                 CXL_EVENT_RECORD_SIZE;
276     if (max_recs > 0xFFFF) {
277         max_recs = 0xFFFF;
278     }
279 
280     return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
281 }
282 
cmd_events_clear_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)283 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
284                                            uint8_t *payload_in,
285                                            size_t len_in,
286                                            uint8_t *payload_out,
287                                            size_t *len_out,
288                                            CXLCCI *cci)
289 {
290     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
291     CXLClearEventPayload *pl;
292 
293     pl = (CXLClearEventPayload *)payload_in;
294 
295     if (len_in < sizeof(*pl) ||
296         len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) {
297         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
298     }
299 
300     *len_out = 0;
301     return cxl_event_clear_records(cxlds, pl);
302 }
303 
cmd_events_get_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)304 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
305                                                   uint8_t *payload_in,
306                                                   size_t len_in,
307                                                   uint8_t *payload_out,
308                                                   size_t *len_out,
309                                                   CXLCCI *cci)
310 {
311     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
312     CXLEventInterruptPolicy *policy;
313     CXLEventLog *log;
314 
315     policy = (CXLEventInterruptPolicy *)payload_out;
316 
317     log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
318     if (log->irq_enabled) {
319         policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
320     }
321 
322     log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
323     if (log->irq_enabled) {
324         policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
325     }
326 
327     log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
328     if (log->irq_enabled) {
329         policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
330     }
331 
332     log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
333     if (log->irq_enabled) {
334         policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
335     }
336 
337     log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
338     if (log->irq_enabled) {
339         /* Dynamic Capacity borrows the same vector as info */
340         policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
341     }
342 
343     *len_out = sizeof(*policy);
344     return CXL_MBOX_SUCCESS;
345 }
346 
cmd_events_set_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)347 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
348                                                   uint8_t *payload_in,
349                                                   size_t len_in,
350                                                   uint8_t *payload_out,
351                                                   size_t *len_out,
352                                                   CXLCCI *cci)
353 {
354     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
355     CXLEventInterruptPolicy *policy;
356     CXLEventLog *log;
357 
358     if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
359         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
360     }
361 
362     policy = (CXLEventInterruptPolicy *)payload_in;
363 
364     log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
365     log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
366                         CXL_INT_MSI_MSIX;
367 
368     log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
369     log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
370                         CXL_INT_MSI_MSIX;
371 
372     log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
373     log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
374                         CXL_INT_MSI_MSIX;
375 
376     log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
377     log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
378                         CXL_INT_MSI_MSIX;
379 
380     /* DCD is optional */
381     if (len_in < sizeof(*policy)) {
382         return CXL_MBOX_SUCCESS;
383     }
384 
385     log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
386     log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
387                         CXL_INT_MSI_MSIX;
388 
389     *len_out = 0;
390     return CXL_MBOX_SUCCESS;
391 }
392 
393 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
cmd_infostat_identify(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)394 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
395                                         uint8_t *payload_in,
396                                         size_t len_in,
397                                         uint8_t *payload_out,
398                                         size_t *len_out,
399                                         CXLCCI *cci)
400 {
401     PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
402     struct {
403         uint16_t pcie_vid;
404         uint16_t pcie_did;
405         uint16_t pcie_subsys_vid;
406         uint16_t pcie_subsys_id;
407         uint64_t sn;
408         uint8_t max_message_size;
409         uint8_t component_type;
410     } QEMU_PACKED *is_identify;
411     QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
412 
413     is_identify = (void *)payload_out;
414     is_identify->pcie_vid = class->vendor_id;
415     is_identify->pcie_did = class->device_id;
416     if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
417         is_identify->sn = CXL_USP(cci->d)->sn;
418         /* Subsystem info not defined for a USP */
419         is_identify->pcie_subsys_vid = 0;
420         is_identify->pcie_subsys_id = 0;
421         is_identify->component_type = 0x0; /* Switch */
422     } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
423         PCIDevice *pci_dev = PCI_DEVICE(cci->d);
424 
425         is_identify->sn = CXL_TYPE3(cci->d)->sn;
426         /*
427          * We can't always use class->subsystem_vendor_id as
428          * it is not set if the defaults are used.
429          */
430         is_identify->pcie_subsys_vid =
431             pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
432         is_identify->pcie_subsys_id =
433             pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
434         is_identify->component_type = 0x3; /* Type 3 */
435     }
436 
437     is_identify->max_message_size = (uint8_t)log2(cci->payload_max);
438     *len_out = sizeof(*is_identify);
439     return CXL_MBOX_SUCCESS;
440 }
441 
442 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */
cmd_get_response_msg_limit(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)443 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd,
444                                              uint8_t *payload_in,
445                                              size_t len_in,
446                                              uint8_t *payload_out,
447                                              size_t *len_out,
448                                              CXLCCI *cci)
449 {
450     struct {
451         uint8_t rsp_limit;
452     } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out;
453     QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1);
454 
455     get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max);
456 
457     *len_out = sizeof(*get_rsp_msg_limit);
458     return CXL_MBOX_SUCCESS;
459 }
460 
461 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */
cmd_set_response_msg_limit(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)462 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd,
463                                              uint8_t *payload_in,
464                                              size_t len_in,
465                                              uint8_t *payload_out,
466                                              size_t *len_out,
467                                              CXLCCI *cci)
468 {
469     struct {
470         uint8_t rsp_limit;
471     } QEMU_PACKED *in = (void *)payload_in;
472     QEMU_BUILD_BUG_ON(sizeof(*in) != 1);
473     struct {
474         uint8_t rsp_limit;
475     } QEMU_PACKED *out = (void *)payload_out;
476     QEMU_BUILD_BUG_ON(sizeof(*out) != 1);
477 
478     if (in->rsp_limit < 8 || in->rsp_limit > 10) {
479         return CXL_MBOX_INVALID_INPUT;
480     }
481 
482     cci->payload_max = 1 << in->rsp_limit;
483     out->rsp_limit = in->rsp_limit;
484 
485     *len_out = sizeof(*out);
486     return CXL_MBOX_SUCCESS;
487 }
488 
cxl_set_dsp_active_bm(PCIBus * b,PCIDevice * d,void * private)489 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
490                                   void *private)
491 {
492     uint8_t *bm = private;
493     if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
494         uint8_t port = PCIE_PORT(d)->port;
495         bm[port / 8] |= 1 << (port % 8);
496     }
497 }
498 
499 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
cmd_identify_switch_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)500 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
501                                              uint8_t *payload_in,
502                                              size_t len_in,
503                                              uint8_t *payload_out,
504                                              size_t *len_out,
505                                              CXLCCI *cci)
506 {
507     PCIEPort *usp = PCIE_PORT(cci->d);
508     PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
509     int num_phys_ports = pcie_count_ds_ports(bus);
510 
511     struct cxl_fmapi_ident_switch_dev_resp_pl {
512         uint8_t ingress_port_id;
513         uint8_t rsvd;
514         uint8_t num_physical_ports;
515         uint8_t num_vcss;
516         uint8_t active_port_bitmask[0x20];
517         uint8_t active_vcs_bitmask[0x20];
518         uint16_t total_vppbs;
519         uint16_t bound_vppbs;
520         uint8_t num_hdm_decoders_per_usp;
521     } QEMU_PACKED *out;
522     QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
523 
524     out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
525     *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
526         .num_physical_ports = num_phys_ports + 1, /* 1 USP */
527         .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
528         .active_vcs_bitmask[0] = 0x1,
529         .total_vppbs = num_phys_ports + 1,
530         .bound_vppbs = num_phys_ports + 1,
531         .num_hdm_decoders_per_usp = 4,
532     };
533 
534     /* Depends on the CCI type */
535     if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
536         out->ingress_port_id = PCIE_PORT(cci->intf)->port;
537     } else {
538         /* MCTP? */
539         out->ingress_port_id = 0;
540     }
541 
542     pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
543                                   out->active_port_bitmask);
544     out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
545 
546     *len_out = sizeof(*out);
547 
548     return CXL_MBOX_SUCCESS;
549 }
550 
551 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
cmd_get_physical_port_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)552 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
553                                               uint8_t *payload_in,
554                                               size_t len_in,
555                                               uint8_t *payload_out,
556                                               size_t *len_out,
557                                               CXLCCI *cci)
558 {
559     /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
560     struct cxl_fmapi_get_phys_port_state_req_pl {
561         uint8_t num_ports;
562         uint8_t ports[];
563     } QEMU_PACKED *in;
564 
565     /*
566      * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
567      * Format
568      */
569     struct cxl_fmapi_port_state_info_block {
570         uint8_t port_id;
571         uint8_t config_state;
572         uint8_t connected_device_cxl_version;
573         uint8_t rsv1;
574         uint8_t connected_device_type;
575         uint8_t port_cxl_version_bitmask;
576         uint8_t max_link_width;
577         uint8_t negotiated_link_width;
578         uint8_t supported_link_speeds_vector;
579         uint8_t max_link_speed;
580         uint8_t current_link_speed;
581         uint8_t ltssm_state;
582         uint8_t first_lane_num;
583         uint16_t link_state;
584         uint8_t supported_ld_count;
585     } QEMU_PACKED;
586 
587     /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
588     struct cxl_fmapi_get_phys_port_state_resp_pl {
589         uint8_t num_ports;
590         uint8_t rsv1[3];
591         struct cxl_fmapi_port_state_info_block ports[];
592     } QEMU_PACKED *out;
593     PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
594     PCIEPort *usp = PCIE_PORT(cci->d);
595     size_t pl_size;
596     int i;
597 
598     in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
599     out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
600 
601     if (len_in < sizeof(*in)) {
602         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
603     }
604     /* Check if what was requested can fit */
605     if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
606         return CXL_MBOX_INVALID_INPUT;
607     }
608 
609     /* For success there should be a match for each requested */
610     out->num_ports = in->num_ports;
611 
612     for (i = 0; i < in->num_ports; i++) {
613         struct cxl_fmapi_port_state_info_block *port;
614         /* First try to match on downstream port */
615         PCIDevice *port_dev;
616         uint16_t lnkcap, lnkcap2, lnksta;
617 
618         port = &out->ports[i];
619 
620         port_dev = pcie_find_port_by_pn(bus, in->ports[i]);
621         if (port_dev) { /* DSP */
622             PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev))
623                 ->devices[0];
624             port->config_state = 3;
625             if (ds_dev) {
626                 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) {
627                     port->connected_device_type = 5; /* Assume MLD for now */
628                 } else {
629                     port->connected_device_type = 1;
630                 }
631             } else {
632                 port->connected_device_type = 0;
633             }
634             port->supported_ld_count = 3;
635         } else if (usp->port == in->ports[i]) { /* USP */
636             port_dev = PCI_DEVICE(usp);
637             port->config_state = 4;
638             port->connected_device_type = 0;
639         } else {
640             return CXL_MBOX_INVALID_INPUT;
641         }
642 
643         port->port_id = in->ports[i];
644         /* Information on status of this port in lnksta, lnkcap */
645         if (!port_dev->exp.exp_cap) {
646             return CXL_MBOX_INTERNAL_ERROR;
647         }
648         lnksta = port_dev->config_read(port_dev,
649                                        port_dev->exp.exp_cap + PCI_EXP_LNKSTA,
650                                        sizeof(lnksta));
651         lnkcap = port_dev->config_read(port_dev,
652                                        port_dev->exp.exp_cap + PCI_EXP_LNKCAP,
653                                        sizeof(lnkcap));
654         lnkcap2 = port_dev->config_read(port_dev,
655                                         port_dev->exp.exp_cap + PCI_EXP_LNKCAP2,
656                                         sizeof(lnkcap2));
657 
658         port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
659         port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4;
660         /* No definition for SLS field in linux/pci_regs.h */
661         port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1;
662         port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS;
663         port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS;
664         /* TODO: Track down if we can get the rest of the info */
665         port->ltssm_state = 0x7;
666         port->first_lane_num = 0;
667         port->link_state = 0;
668         port->port_cxl_version_bitmask = 0x2;
669         port->connected_device_cxl_version = 0x2;
670     }
671 
672     pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports;
673     *len_out = pl_size;
674 
675     return CXL_MBOX_SUCCESS;
676 }
677 
678 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
cmd_infostat_bg_op_sts(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)679 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
680                                          uint8_t *payload_in,
681                                          size_t len_in,
682                                          uint8_t *payload_out,
683                                          size_t *len_out,
684                                          CXLCCI *cci)
685 {
686     struct {
687         uint8_t status;
688         uint8_t rsvd;
689         uint16_t opcode;
690         uint16_t returncode;
691         uint16_t vendor_ext_status;
692     } QEMU_PACKED *bg_op_status;
693     QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
694 
695     bg_op_status = (void *)payload_out;
696     bg_op_status->status = cci->bg.complete_pct << 1;
697     if (cci->bg.runtime > 0) {
698         bg_op_status->status |= 1U << 0;
699     }
700     bg_op_status->opcode = cci->bg.opcode;
701     bg_op_status->returncode = cci->bg.ret_code;
702     *len_out = sizeof(*bg_op_status);
703 
704     return CXL_MBOX_SUCCESS;
705 }
706 
707 /*
708  * CXL r3.1 Section 8.2.9.1.5:
709  * Request Abort Background Operation (Opcode 0005h)
710  */
cmd_infostat_bg_op_abort(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)711 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd,
712                                            uint8_t *payload_in,
713                                            size_t len_in,
714                                            uint8_t *payload_out,
715                                            size_t *len_out,
716                                            CXLCCI *cci)
717 {
718     int bg_set = cci->bg.opcode >> 8;
719     int bg_cmd = cci->bg.opcode & 0xff;
720     const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd];
721 
722     if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) {
723         return CXL_MBOX_REQUEST_ABORT_NOTSUP;
724     }
725 
726     qemu_mutex_lock(&cci->bg.lock);
727     if (cci->bg.runtime) {
728         /* operation is near complete, let it finish */
729         if (cci->bg.complete_pct < 85) {
730             timer_del(cci->bg.timer);
731             cci->bg.ret_code = CXL_MBOX_ABORTED;
732             cci->bg.starttime = 0;
733             cci->bg.runtime = 0;
734             cci->bg.aborted = true;
735         }
736     }
737     qemu_mutex_unlock(&cci->bg.lock);
738 
739     return CXL_MBOX_SUCCESS;
740 }
741 
742 #define CXL_FW_SLOTS 2
743 #define CXL_FW_SIZE  0x02000000 /* 32 mb */
744 
745 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
cmd_firmware_update_get_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)746 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
747                                                uint8_t *payload_in,
748                                                size_t len,
749                                                uint8_t *payload_out,
750                                                size_t *len_out,
751                                                CXLCCI *cci)
752 {
753     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
754     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
755     struct {
756         uint8_t slots_supported;
757         uint8_t slot_info;
758         uint8_t caps;
759         uint8_t rsvd[0xd];
760         char fw_rev1[0x10];
761         char fw_rev2[0x10];
762         char fw_rev3[0x10];
763         char fw_rev4[0x10];
764     } QEMU_PACKED *fw_info;
765     QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
766 
767     if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) ||
768         !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) ||
769         !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) {
770         return CXL_MBOX_INTERNAL_ERROR;
771     }
772 
773     fw_info = (void *)payload_out;
774 
775     fw_info->slots_supported = CXL_FW_SLOTS;
776     fw_info->slot_info = (cci->fw.active_slot & 0x7) |
777             ((cci->fw.staged_slot & 0x7) << 3);
778     fw_info->caps = BIT(0);  /* online update supported */
779 
780     if (cci->fw.slot[0]) {
781         pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
782     }
783     if (cci->fw.slot[1]) {
784         pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1");
785     }
786 
787     *len_out = sizeof(*fw_info);
788     return CXL_MBOX_SUCCESS;
789 }
790 
791 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
792 #define CXL_FW_XFER_ALIGNMENT   128
793 
794 #define CXL_FW_XFER_ACTION_FULL     0x0
795 #define CXL_FW_XFER_ACTION_INIT     0x1
796 #define CXL_FW_XFER_ACTION_CONTINUE 0x2
797 #define CXL_FW_XFER_ACTION_END      0x3
798 #define CXL_FW_XFER_ACTION_ABORT    0x4
799 
cmd_firmware_update_transfer(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)800 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd,
801                                                uint8_t *payload_in,
802                                                size_t len,
803                                                uint8_t *payload_out,
804                                                size_t *len_out,
805                                                CXLCCI *cci)
806 {
807     struct {
808         uint8_t action;
809         uint8_t slot;
810         uint8_t rsvd1[2];
811         uint32_t offset;
812         uint8_t rsvd2[0x78];
813         uint8_t data[];
814     } QEMU_PACKED *fw_transfer = (void *)payload_in;
815     size_t offset, length;
816 
817     if (len < sizeof(*fw_transfer)) {
818         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
819     }
820 
821     if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) {
822         /*
823          * At this point there aren't any on-going transfers
824          * running in the bg - this is serialized before this
825          * call altogether. Just mark the state machine and
826          * disregard any other input.
827          */
828         cci->fw.transferring = false;
829         return CXL_MBOX_SUCCESS;
830     }
831 
832     offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT;
833     length = len - sizeof(*fw_transfer);
834     if (offset + length > CXL_FW_SIZE) {
835         return CXL_MBOX_INVALID_INPUT;
836     }
837 
838     if (cci->fw.transferring) {
839         if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL ||
840             fw_transfer->action == CXL_FW_XFER_ACTION_INIT) {
841             return CXL_MBOX_FW_XFER_IN_PROGRESS;
842         }
843         /*
844          * Abort partitioned package transfer if over 30 secs
845          * between parts. As opposed to the explicit ABORT action,
846          * semantically treat this condition as an error - as
847          * if a part action were passed without a previous INIT.
848          */
849         if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) {
850             cci->fw.transferring = false;
851             return CXL_MBOX_INVALID_INPUT;
852         }
853     } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
854                fw_transfer->action == CXL_FW_XFER_ACTION_END) {
855         return CXL_MBOX_INVALID_INPUT;
856     }
857 
858     /* allow back-to-back retransmission */
859     if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) &&
860         (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
861          fw_transfer->action == CXL_FW_XFER_ACTION_END)) {
862         /* verify no overlaps */
863         if (offset < cci->fw.prev_offset + cci->fw.prev_len) {
864             return CXL_MBOX_FW_XFER_OUT_OF_ORDER;
865         }
866     }
867 
868     switch (fw_transfer->action) {
869     case CXL_FW_XFER_ACTION_FULL: /* ignores offset */
870     case CXL_FW_XFER_ACTION_END:
871         if (fw_transfer->slot == 0 ||
872             fw_transfer->slot == cci->fw.active_slot ||
873             fw_transfer->slot > CXL_FW_SLOTS) {
874             return CXL_MBOX_FW_INVALID_SLOT;
875         }
876 
877         /* mark the slot used upon bg completion */
878         break;
879     case CXL_FW_XFER_ACTION_INIT:
880         if (offset != 0) {
881             return CXL_MBOX_INVALID_INPUT;
882         }
883 
884         cci->fw.transferring = true;
885         cci->fw.prev_offset = offset;
886         cci->fw.prev_len = length;
887         break;
888     case CXL_FW_XFER_ACTION_CONTINUE:
889         cci->fw.prev_offset = offset;
890         cci->fw.prev_len = length;
891         break;
892     default:
893         return CXL_MBOX_INVALID_INPUT;
894     }
895 
896     if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) {
897         cci->bg.runtime = 10 * 1000UL;
898     } else {
899         cci->bg.runtime = 2 * 1000UL;
900     }
901     /* keep relevant context for bg completion */
902     cci->fw.curr_action = fw_transfer->action;
903     cci->fw.curr_slot = fw_transfer->slot;
904     *len_out = 0;
905 
906     return CXL_MBOX_BG_STARTED;
907 }
908 
__do_firmware_xfer(CXLCCI * cci)909 static void __do_firmware_xfer(CXLCCI *cci)
910 {
911     switch (cci->fw.curr_action) {
912     case CXL_FW_XFER_ACTION_FULL:
913     case CXL_FW_XFER_ACTION_END:
914         cci->fw.slot[cci->fw.curr_slot - 1] = true;
915         cci->fw.transferring = false;
916         break;
917     case CXL_FW_XFER_ACTION_INIT:
918     case CXL_FW_XFER_ACTION_CONTINUE:
919         time(&cci->fw.last_partxfer);
920         break;
921     default:
922         break;
923     }
924 }
925 
926 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
cmd_firmware_update_activate(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)927 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd,
928                                                uint8_t *payload_in,
929                                                size_t len,
930                                                uint8_t *payload_out,
931                                                size_t *len_out,
932                                                CXLCCI *cci)
933 {
934     struct {
935         uint8_t action;
936         uint8_t slot;
937     } QEMU_PACKED *fw_activate = (void *)payload_in;
938     QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2);
939 
940     if (fw_activate->slot == 0 ||
941         fw_activate->slot == cci->fw.active_slot ||
942         fw_activate->slot > CXL_FW_SLOTS) {
943         return CXL_MBOX_FW_INVALID_SLOT;
944     }
945 
946     /* ensure that an actual fw package is there */
947     if (!cci->fw.slot[fw_activate->slot - 1]) {
948         return CXL_MBOX_FW_INVALID_SLOT;
949     }
950 
951     switch (fw_activate->action) {
952     case 0: /* online */
953         cci->fw.active_slot = fw_activate->slot;
954         break;
955     case 1: /* reset */
956         cci->fw.staged_slot = fw_activate->slot;
957         break;
958     default:
959         return CXL_MBOX_INVALID_INPUT;
960     }
961 
962     return CXL_MBOX_SUCCESS;
963 }
964 
965 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
cmd_timestamp_get(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)966 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
967                                     uint8_t *payload_in,
968                                     size_t len_in,
969                                     uint8_t *payload_out,
970                                     size_t *len_out,
971                                     CXLCCI *cci)
972 {
973     CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
974     uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
975 
976     stq_le_p(payload_out, final_time);
977     *len_out = 8;
978 
979     return CXL_MBOX_SUCCESS;
980 }
981 
982 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
cmd_timestamp_set(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)983 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
984                                     uint8_t *payload_in,
985                                     size_t len_in,
986                                     uint8_t *payload_out,
987                                     size_t *len_out,
988                                     CXLCCI *cci)
989 {
990     CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
991 
992     cxl_dstate->timestamp.set = true;
993     cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
994 
995     cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
996 
997     *len_out = 0;
998     return CXL_MBOX_SUCCESS;
999 }
1000 
1001 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
1002 static const QemuUUID cel_uuid = {
1003     .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
1004                  0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
1005 };
1006 
1007 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
cmd_logs_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1008 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
1009                                          uint8_t *payload_in,
1010                                          size_t len_in,
1011                                          uint8_t *payload_out,
1012                                          size_t *len_out,
1013                                          CXLCCI *cci)
1014 {
1015     struct {
1016         uint16_t entries;
1017         uint8_t rsvd[6];
1018         struct {
1019             QemuUUID uuid;
1020             uint32_t size;
1021         } log_entries[1];
1022     } QEMU_PACKED *supported_logs = (void *)payload_out;
1023     QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
1024 
1025     supported_logs->entries = 1;
1026     supported_logs->log_entries[0].uuid = cel_uuid;
1027     supported_logs->log_entries[0].size = 4 * cci->cel_size;
1028 
1029     *len_out = sizeof(*supported_logs);
1030     return CXL_MBOX_SUCCESS;
1031 }
1032 
1033 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
cmd_logs_get_log(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1034 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
1035                                    uint8_t *payload_in,
1036                                    size_t len_in,
1037                                    uint8_t *payload_out,
1038                                    size_t *len_out,
1039                                    CXLCCI *cci)
1040 {
1041     struct {
1042         QemuUUID uuid;
1043         uint32_t offset;
1044         uint32_t length;
1045     } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
1046 
1047     get_log = (void *)payload_in;
1048 
1049     if (get_log->length > cci->payload_max) {
1050         return CXL_MBOX_INVALID_INPUT;
1051     }
1052 
1053     if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
1054         return CXL_MBOX_INVALID_LOG;
1055     }
1056 
1057     /*
1058      * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
1059      *   The device shall return Invalid Input if the Offset or Length
1060      *   fields attempt to access beyond the size of the log as reported by Get
1061      *   Supported Log.
1062      *
1063      * Only valid for there to be one entry per opcode, but the length + offset
1064      * may still be greater than that if the inputs are not valid and so access
1065      * beyond the end of cci->cel_log.
1066      */
1067     if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) {
1068         return CXL_MBOX_INVALID_INPUT;
1069     }
1070 
1071     /* Store off everything to local variables so we can wipe out the payload */
1072     *len_out = get_log->length;
1073 
1074     memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
1075 
1076     return CXL_MBOX_SUCCESS;
1077 }
1078 
1079 /* CXL r3.1 section 8.2.9.6: Features */
1080 /*
1081  * Get Supported Features output payload
1082  * CXL r3.1 section 8.2.9.6.1 Table 8-96
1083  */
1084 typedef struct CXLSupportedFeatureHeader {
1085     uint16_t entries;
1086     uint16_t nsuppfeats_dev;
1087     uint32_t reserved;
1088 } QEMU_PACKED CXLSupportedFeatureHeader;
1089 
1090 /*
1091  * Get Supported Features Supported Feature Entry
1092  * CXL r3.1 section 8.2.9.6.1 Table 8-97
1093  */
1094 typedef struct CXLSupportedFeatureEntry {
1095     QemuUUID uuid;
1096     uint16_t feat_index;
1097     uint16_t get_feat_size;
1098     uint16_t set_feat_size;
1099     uint32_t attr_flags;
1100     uint8_t get_feat_version;
1101     uint8_t set_feat_version;
1102     uint16_t set_feat_effects;
1103     uint8_t rsvd[18];
1104 } QEMU_PACKED CXLSupportedFeatureEntry;
1105 
1106 /*
1107  * Get Supported Features Supported Feature Entry
1108  * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
1109  */
1110 /* Supported Feature Entry : attribute flags */
1111 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
1112 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
1113 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
1114 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
1115 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
1116 
1117 /* Supported Feature Entry : set feature effects */
1118 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
1119 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
1120 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
1121 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
1122 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
1123 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
1124 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
1125 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
1126 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
1127 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
1128 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
1129 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
1130 
1131 enum CXL_SUPPORTED_FEATURES_LIST {
1132     CXL_FEATURE_PATROL_SCRUB = 0,
1133     CXL_FEATURE_ECS,
1134     CXL_FEATURE_MAX
1135 };
1136 
1137 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
1138 /*
1139  * Get Feature input payload
1140  * CXL r3.1 section 8.2.9.6.2 Table 8-99
1141  */
1142 /* Get Feature : Payload in selection */
1143 enum CXL_GET_FEATURE_SELECTION {
1144     CXL_GET_FEATURE_SEL_CURRENT_VALUE,
1145     CXL_GET_FEATURE_SEL_DEFAULT_VALUE,
1146     CXL_GET_FEATURE_SEL_SAVED_VALUE,
1147     CXL_GET_FEATURE_SEL_MAX
1148 };
1149 
1150 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
1151 /*
1152  * Set Feature input payload
1153  * CXL r3.1 section 8.2.9.6.3 Table 8-101
1154  */
1155 typedef struct CXLSetFeatureInHeader {
1156         QemuUUID uuid;
1157         uint32_t flags;
1158         uint16_t offset;
1159         uint8_t version;
1160         uint8_t rsvd[9];
1161 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader;
1162 
1163 /* Set Feature : Payload in flags */
1164 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK   0x7
1165 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
1166     CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER,
1167     CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER,
1168     CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER,
1169     CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER,
1170     CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER,
1171     CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
1172 };
1173 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
1174 
1175 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
1176 static const QemuUUID patrol_scrub_uuid = {
1177     .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
1178                  0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
1179 };
1180 
1181 typedef struct CXLMemPatrolScrubSetFeature {
1182         CXLSetFeatureInHeader hdr;
1183         CXLMemPatrolScrubWriteAttrs feat_data;
1184 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
1185 
1186 /*
1187  * CXL r3.1 section 8.2.9.9.11.2:
1188  * DDR5 Error Check Scrub (ECS) Control Feature
1189  */
1190 static const QemuUUID ecs_uuid = {
1191     .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
1192                  0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
1193 };
1194 
1195 typedef struct CXLMemECSSetFeature {
1196         CXLSetFeatureInHeader hdr;
1197         CXLMemECSWriteAttrs feat_data[];
1198 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature;
1199 
1200 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
cmd_features_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1201 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
1202                                              uint8_t *payload_in,
1203                                              size_t len_in,
1204                                              uint8_t *payload_out,
1205                                              size_t *len_out,
1206                                              CXLCCI *cci)
1207 {
1208     struct {
1209         uint32_t count;
1210         uint16_t start_index;
1211         uint16_t reserved;
1212     } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in;
1213 
1214     struct {
1215         CXLSupportedFeatureHeader hdr;
1216         CXLSupportedFeatureEntry feat_entries[];
1217     } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out;
1218     uint16_t index, req_entries;
1219     uint16_t entry;
1220 
1221     if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1222         return CXL_MBOX_UNSUPPORTED;
1223     }
1224     if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
1225         get_feats_in->start_index >= CXL_FEATURE_MAX) {
1226         return CXL_MBOX_INVALID_INPUT;
1227     }
1228 
1229     req_entries = (get_feats_in->count -
1230                    sizeof(CXLSupportedFeatureHeader)) /
1231                    sizeof(CXLSupportedFeatureEntry);
1232     req_entries = MIN(req_entries,
1233                       (CXL_FEATURE_MAX - get_feats_in->start_index));
1234 
1235     for (entry = 0, index = get_feats_in->start_index;
1236          entry < req_entries; index++) {
1237         switch (index) {
1238         case  CXL_FEATURE_PATROL_SCRUB:
1239             /* Fill supported feature entry for device patrol scrub control */
1240             get_feats_out->feat_entries[entry++] =
1241                            (struct CXLSupportedFeatureEntry) {
1242                 .uuid = patrol_scrub_uuid,
1243                 .feat_index = index,
1244                 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs),
1245                 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs),
1246                 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1247                 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
1248                 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
1249                 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1250                                     CXL_FEAT_ENTRY_SFE_CEL_VALID,
1251             };
1252             break;
1253         case  CXL_FEATURE_ECS:
1254             /* Fill supported feature entry for device DDR5 ECS control */
1255             get_feats_out->feat_entries[entry++] =
1256                          (struct CXLSupportedFeatureEntry) {
1257                 .uuid = ecs_uuid,
1258                 .feat_index = index,
1259                 .get_feat_size = sizeof(CXLMemECSReadAttrs),
1260                 .set_feat_size = sizeof(CXLMemECSWriteAttrs),
1261                 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1262                 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION,
1263                 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION,
1264                 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1265                                     CXL_FEAT_ENTRY_SFE_CEL_VALID,
1266             };
1267             break;
1268         default:
1269             __builtin_unreachable();
1270         }
1271     }
1272     get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX;
1273     get_feats_out->hdr.entries = req_entries;
1274     *len_out = sizeof(CXLSupportedFeatureHeader) +
1275                       req_entries * sizeof(CXLSupportedFeatureEntry);
1276 
1277     return CXL_MBOX_SUCCESS;
1278 }
1279 
1280 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
cmd_features_get_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1281 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
1282                                            uint8_t *payload_in,
1283                                            size_t len_in,
1284                                            uint8_t *payload_out,
1285                                            size_t *len_out,
1286                                            CXLCCI *cci)
1287 {
1288     struct {
1289         QemuUUID uuid;
1290         uint16_t offset;
1291         uint16_t count;
1292         uint8_t selection;
1293     } QEMU_PACKED QEMU_ALIGNED(16) * get_feature;
1294     uint16_t bytes_to_copy = 0;
1295     CXLType3Dev *ct3d;
1296     CXLSetFeatureInfo *set_feat_info;
1297 
1298     if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1299         return CXL_MBOX_UNSUPPORTED;
1300     }
1301 
1302     ct3d = CXL_TYPE3(cci->d);
1303     get_feature = (void *)payload_in;
1304 
1305     set_feat_info = &ct3d->set_feat_info;
1306     if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) {
1307         return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1308     }
1309 
1310     if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) {
1311         return CXL_MBOX_UNSUPPORTED;
1312     }
1313     if (get_feature->offset + get_feature->count > cci->payload_max) {
1314         return CXL_MBOX_INVALID_INPUT;
1315     }
1316 
1317     if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
1318         if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) {
1319             return CXL_MBOX_INVALID_INPUT;
1320         }
1321         bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) -
1322                                              get_feature->offset;
1323         bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1324         memcpy(payload_out,
1325                (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset,
1326                bytes_to_copy);
1327     } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) {
1328         if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) {
1329             return CXL_MBOX_INVALID_INPUT;
1330         }
1331         bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset;
1332         bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1333         memcpy(payload_out,
1334                (uint8_t *)&ct3d->ecs_attrs + get_feature->offset,
1335                bytes_to_copy);
1336     } else {
1337         return CXL_MBOX_UNSUPPORTED;
1338     }
1339 
1340     *len_out = bytes_to_copy;
1341 
1342     return CXL_MBOX_SUCCESS;
1343 }
1344 
1345 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
cmd_features_set_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1346 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
1347                                            uint8_t *payload_in,
1348                                            size_t len_in,
1349                                            uint8_t *payload_out,
1350                                            size_t *len_out,
1351                                            CXLCCI *cci)
1352 {
1353     CXLSetFeatureInHeader *hdr = (void *)payload_in;
1354     CXLMemPatrolScrubWriteAttrs *ps_write_attrs;
1355     CXLMemPatrolScrubSetFeature *ps_set_feature;
1356     CXLMemECSWriteAttrs *ecs_write_attrs;
1357     CXLMemECSSetFeature *ecs_set_feature;
1358     CXLSetFeatureInfo *set_feat_info;
1359     uint16_t bytes_to_copy = 0;
1360     uint8_t data_transfer_flag;
1361     CXLType3Dev *ct3d;
1362     uint16_t count;
1363 
1364     if (len_in < sizeof(*hdr)) {
1365         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1366     }
1367 
1368     if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1369         return CXL_MBOX_UNSUPPORTED;
1370     }
1371     ct3d = CXL_TYPE3(cci->d);
1372     set_feat_info = &ct3d->set_feat_info;
1373 
1374     if (!qemu_uuid_is_null(&set_feat_info->uuid) &&
1375         !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) {
1376         return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1377     }
1378     if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) {
1379         set_feat_info->data_saved_across_reset = true;
1380     } else {
1381         set_feat_info->data_saved_across_reset = false;
1382     }
1383 
1384     data_transfer_flag =
1385               hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK;
1386     if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) {
1387         set_feat_info->uuid = hdr->uuid;
1388         set_feat_info->data_size = 0;
1389     }
1390     set_feat_info->data_transfer_flag = data_transfer_flag;
1391     set_feat_info->data_offset = hdr->offset;
1392     bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
1393 
1394     if (bytes_to_copy == 0) {
1395         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1396     }
1397 
1398     if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1399         if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
1400             return CXL_MBOX_UNSUPPORTED;
1401         }
1402 
1403         ps_set_feature = (void *)payload_in;
1404         ps_write_attrs = &ps_set_feature->feat_data;
1405 
1406         if ((uint32_t)hdr->offset + bytes_to_copy >
1407             sizeof(ct3d->patrol_scrub_wr_attrs)) {
1408             return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1409         }
1410         memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset,
1411                ps_write_attrs,
1412                bytes_to_copy);
1413         set_feat_info->data_size += bytes_to_copy;
1414 
1415         if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1416             data_transfer_flag ==  CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1417             ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF;
1418             ct3d->patrol_scrub_attrs.scrub_cycle |=
1419                           ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF;
1420             ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1;
1421             ct3d->patrol_scrub_attrs.scrub_flags |=
1422                           ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1;
1423         }
1424     } else if (qemu_uuid_is_equal(&hdr->uuid,
1425                                   &ecs_uuid)) {
1426         if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) {
1427             return CXL_MBOX_UNSUPPORTED;
1428         }
1429 
1430         ecs_set_feature = (void *)payload_in;
1431         ecs_write_attrs = ecs_set_feature->feat_data;
1432 
1433         if ((uint32_t)hdr->offset + bytes_to_copy >
1434             sizeof(ct3d->ecs_wr_attrs)) {
1435             return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1436         }
1437         memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset,
1438                ecs_write_attrs,
1439                bytes_to_copy);
1440         set_feat_info->data_size += bytes_to_copy;
1441 
1442         if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1443             data_transfer_flag ==  CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1444             ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap;
1445             for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
1446                 ct3d->ecs_attrs.fru_attrs[count].ecs_config =
1447                         ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F;
1448             }
1449         }
1450     } else {
1451         return CXL_MBOX_UNSUPPORTED;
1452     }
1453 
1454     if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1455         data_transfer_flag ==  CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER ||
1456         data_transfer_flag ==  CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) {
1457         memset(&set_feat_info->uuid, 0, sizeof(QemuUUID));
1458         if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1459             memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size);
1460         } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) {
1461             memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size);
1462         }
1463         set_feat_info->data_transfer_flag = 0;
1464         set_feat_info->data_saved_across_reset = false;
1465         set_feat_info->data_offset = 0;
1466         set_feat_info->data_size = 0;
1467     }
1468 
1469     return CXL_MBOX_SUCCESS;
1470 }
1471 
1472 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
cmd_identify_memory_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1473 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
1474                                              uint8_t *payload_in,
1475                                              size_t len_in,
1476                                              uint8_t *payload_out,
1477                                              size_t *len_out,
1478                                              CXLCCI *cci)
1479 {
1480     struct {
1481         char fw_revision[0x10];
1482         uint64_t total_capacity;
1483         uint64_t volatile_capacity;
1484         uint64_t persistent_capacity;
1485         uint64_t partition_align;
1486         uint16_t info_event_log_size;
1487         uint16_t warning_event_log_size;
1488         uint16_t failure_event_log_size;
1489         uint16_t fatal_event_log_size;
1490         uint32_t lsa_size;
1491         uint8_t poison_list_max_mer[3];
1492         uint16_t inject_poison_limit;
1493         uint8_t poison_caps;
1494         uint8_t qos_telemetry_caps;
1495         uint16_t dc_event_log_size;
1496     } QEMU_PACKED *id;
1497     QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45);
1498     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1499     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1500     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1501 
1502     if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1503         (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1504         (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1505         return CXL_MBOX_INTERNAL_ERROR;
1506     }
1507 
1508     id = (void *)payload_out;
1509 
1510     snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
1511 
1512     stq_le_p(&id->total_capacity,
1513              cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER);
1514     stq_le_p(&id->persistent_capacity,
1515              cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1516     stq_le_p(&id->volatile_capacity,
1517              cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1518     stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
1519     /* 256 poison records */
1520     st24_le_p(id->poison_list_max_mer, 256);
1521     /* No limit - so limited by main poison record limit */
1522     stw_le_p(&id->inject_poison_limit, 0);
1523     stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE);
1524 
1525     *len_out = sizeof(*id);
1526     return CXL_MBOX_SUCCESS;
1527 }
1528 
1529 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
cmd_ccls_get_partition_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1530 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
1531                                               uint8_t *payload_in,
1532                                               size_t len_in,
1533                                               uint8_t *payload_out,
1534                                               size_t *len_out,
1535                                               CXLCCI *cci)
1536 {
1537     CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
1538     struct {
1539         uint64_t active_vmem;
1540         uint64_t active_pmem;
1541         uint64_t next_vmem;
1542         uint64_t next_pmem;
1543     } QEMU_PACKED *part_info = (void *)payload_out;
1544     QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
1545     CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
1546 
1547     if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1548         (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1549         (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1550         return CXL_MBOX_INTERNAL_ERROR;
1551     }
1552 
1553     stq_le_p(&part_info->active_vmem,
1554              cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1555     /*
1556      * When both next_vmem and next_pmem are 0, there is no pending change to
1557      * partitioning.
1558      */
1559     stq_le_p(&part_info->next_vmem, 0);
1560     stq_le_p(&part_info->active_pmem,
1561              cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1562     stq_le_p(&part_info->next_pmem, 0);
1563 
1564     *len_out = sizeof(*part_info);
1565     return CXL_MBOX_SUCCESS;
1566 }
1567 
1568 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
cmd_ccls_get_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1569 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
1570                                    uint8_t *payload_in,
1571                                    size_t len_in,
1572                                    uint8_t *payload_out,
1573                                    size_t *len_out,
1574                                    CXLCCI *cci)
1575 {
1576     struct {
1577         uint32_t offset;
1578         uint32_t length;
1579     } QEMU_PACKED *get_lsa;
1580     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1581     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1582     uint64_t offset, length;
1583 
1584     get_lsa = (void *)payload_in;
1585     offset = get_lsa->offset;
1586     length = get_lsa->length;
1587 
1588     if (offset + length > cvc->get_lsa_size(ct3d)) {
1589         *len_out = 0;
1590         return CXL_MBOX_INVALID_INPUT;
1591     }
1592 
1593     *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
1594     return CXL_MBOX_SUCCESS;
1595 }
1596 
1597 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
cmd_ccls_set_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1598 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
1599                                    uint8_t *payload_in,
1600                                    size_t len_in,
1601                                    uint8_t *payload_out,
1602                                    size_t *len_out,
1603                                    CXLCCI *cci)
1604 {
1605     struct set_lsa_pl {
1606         uint32_t offset;
1607         uint32_t rsvd;
1608         uint8_t data[];
1609     } QEMU_PACKED;
1610     struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
1611     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1612     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1613     const size_t hdr_len = offsetof(struct set_lsa_pl, data);
1614 
1615     *len_out = 0;
1616     if (len_in < hdr_len) {
1617         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1618     }
1619 
1620     if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
1621         return CXL_MBOX_INVALID_INPUT;
1622     }
1623     len_in -= hdr_len;
1624 
1625     cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
1626     return CXL_MBOX_SUCCESS;
1627 }
1628 
1629 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */
cmd_get_alert_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1630 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd,
1631                                        uint8_t *payload_in,
1632                                        size_t len_in,
1633                                        uint8_t *payload_out,
1634                                        size_t *len_out,
1635                                        CXLCCI *cci)
1636 {
1637     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1638     CXLAlertConfig *out = (CXLAlertConfig *)payload_out;
1639 
1640     memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config));
1641     *len_out = sizeof(ct3d->alert_config);
1642 
1643     return CXL_MBOX_SUCCESS;
1644 }
1645 
1646 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */
cmd_set_alert_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1647 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd,
1648                                        uint8_t *payload_in,
1649                                        size_t len_in,
1650                                        uint8_t *payload_out,
1651                                        size_t *len_out,
1652                                        CXLCCI *cci)
1653 {
1654     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1655     CXLAlertConfig *alert_config = &ct3d->alert_config;
1656     struct {
1657         uint8_t valid_alert_actions;
1658         uint8_t enable_alert_actions;
1659         uint8_t life_used_warn_thresh;
1660         uint8_t rsvd;
1661         uint16_t over_temp_warn_thresh;
1662         uint16_t under_temp_warn_thresh;
1663         uint16_t cor_vmem_err_warn_thresh;
1664         uint16_t cor_pmem_err_warn_thresh;
1665     } QEMU_PACKED *in = (void *)payload_in;
1666 
1667     if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) {
1668         /*
1669          * CXL r3.2 Table 8-149 The life used warning threshold shall be
1670          * less than the life used critical alert value.
1671          */
1672         if (in->life_used_warn_thresh >=
1673             alert_config->life_used_crit_alert_thresh) {
1674             return CXL_MBOX_INVALID_INPUT;
1675         }
1676         alert_config->life_used_warn_thresh = in->life_used_warn_thresh;
1677         alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH;
1678     }
1679 
1680     if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) {
1681         /*
1682          * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold
1683          * shall be less than the the Device Over-Temperature Critical
1684          * Alert Threshold.
1685          */
1686         if (in->over_temp_warn_thresh >=
1687             alert_config->over_temp_crit_alert_thresh) {
1688             return CXL_MBOX_INVALID_INPUT;
1689         }
1690         alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh;
1691         alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH;
1692     }
1693 
1694     if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) {
1695         /*
1696          * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold
1697          * shall be higher than the the Device Under-Temperature Critical
1698          * Alert Threshold.
1699          */
1700         if (in->under_temp_warn_thresh <=
1701             alert_config->under_temp_crit_alert_thresh) {
1702             return CXL_MBOX_INVALID_INPUT;
1703         }
1704         alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh;
1705         alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH;
1706     }
1707 
1708     if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) {
1709         alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh;
1710         alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH;
1711     }
1712 
1713     if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) {
1714         alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh;
1715         alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH;
1716     }
1717     return CXL_MBOX_SUCCESS;
1718 }
1719 
1720 /* Perform the actual device zeroing */
__do_sanitization(CXLType3Dev * ct3d)1721 static void __do_sanitization(CXLType3Dev *ct3d)
1722 {
1723     MemoryRegion *mr;
1724 
1725     if (ct3d->hostvmem) {
1726         mr = host_memory_backend_get_memory(ct3d->hostvmem);
1727         if (mr) {
1728             void *hostmem = memory_region_get_ram_ptr(mr);
1729             memset(hostmem, 0, memory_region_size(mr));
1730         }
1731     }
1732 
1733     if (ct3d->hostpmem) {
1734         mr = host_memory_backend_get_memory(ct3d->hostpmem);
1735         if (mr) {
1736             void *hostmem = memory_region_get_ram_ptr(mr);
1737             memset(hostmem, 0, memory_region_size(mr));
1738         }
1739     }
1740     if (ct3d->lsa) {
1741         mr = host_memory_backend_get_memory(ct3d->lsa);
1742         if (mr) {
1743             void *lsa = memory_region_get_ram_ptr(mr);
1744             memset(lsa, 0, memory_region_size(mr));
1745         }
1746     }
1747     cxl_discard_all_event_records(&ct3d->cxl_dstate);
1748 }
1749 
get_sanitize_duration(uint64_t total_mem)1750 static int get_sanitize_duration(uint64_t total_mem)
1751 {
1752     int secs = 0;
1753 
1754     if (total_mem <= 512) {
1755         secs = 4;
1756     } else if (total_mem <= 1024) {
1757         secs = 8;
1758     } else if (total_mem <= 2 * 1024) {
1759         secs = 15;
1760     } else if (total_mem <= 4 * 1024) {
1761         secs = 30;
1762     } else if (total_mem <= 8 * 1024) {
1763         secs = 60;
1764     } else if (total_mem <= 16 * 1024) {
1765         secs = 2 * 60;
1766     } else if (total_mem <= 32 * 1024) {
1767         secs = 4 * 60;
1768     } else if (total_mem <= 64 * 1024) {
1769         secs = 8 * 60;
1770     } else if (total_mem <= 128 * 1024) {
1771         secs = 15 * 60;
1772     } else if (total_mem <= 256 * 1024) {
1773         secs = 30 * 60;
1774     } else if (total_mem <= 512 * 1024) {
1775         secs = 60 * 60;
1776     } else if (total_mem <= 1024 * 1024) {
1777         secs = 120 * 60;
1778     } else {
1779         secs = 240 * 60; /* max 4 hrs */
1780     }
1781 
1782     return secs;
1783 }
1784 
1785 /*
1786  * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
1787  *
1788  * Once the Sanitize command has started successfully, the device shall be
1789  * placed in the media disabled state. If the command fails or is interrupted
1790  * by a reset or power failure, it shall remain in the media disabled state
1791  * until a successful Sanitize command has been completed. During this state:
1792  *
1793  * 1. Memory writes to the device will have no effect, and all memory reads
1794  * will return random values (no user data returned, even for locations that
1795  * the failed Sanitize operation didn’t sanitize yet).
1796  *
1797  * 2. Mailbox commands shall still be processed in the disabled state, except
1798  * that commands that access Sanitized areas shall fail with the Media Disabled
1799  * error code.
1800  */
cmd_sanitize_overwrite(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1801 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
1802                                          uint8_t *payload_in,
1803                                          size_t len_in,
1804                                          uint8_t *payload_out,
1805                                          size_t *len_out,
1806                                          CXLCCI *cci)
1807 {
1808     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1809     uint64_t total_mem; /* in Mb */
1810     int secs;
1811 
1812     total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
1813     secs = get_sanitize_duration(total_mem);
1814 
1815     /* EBUSY other bg cmds as of now */
1816     cci->bg.runtime = secs * 1000UL;
1817     *len_out = 0;
1818 
1819     cxl_dev_disable_media(&ct3d->cxl_dstate);
1820 
1821     /* sanitize when done */
1822     return CXL_MBOX_BG_STARTED;
1823 }
1824 
1825 struct dpa_range_list_entry {
1826     uint64_t starting_dpa;
1827     uint64_t length;
1828 } QEMU_PACKED;
1829 
1830 struct CXLSanitizeInfo {
1831     uint32_t dpa_range_count;
1832     uint8_t fill_value;
1833     struct dpa_range_list_entry dpa_range_list[];
1834 } QEMU_PACKED;
1835 
get_vmr_size(CXLType3Dev * ct3d,MemoryRegion ** vmr)1836 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr)
1837 {
1838     MemoryRegion *mr;
1839     if (ct3d->hostvmem) {
1840         mr = host_memory_backend_get_memory(ct3d->hostvmem);
1841         if (vmr) {
1842             *vmr = mr;
1843         }
1844         return memory_region_size(mr);
1845     }
1846     return 0;
1847 }
1848 
get_pmr_size(CXLType3Dev * ct3d,MemoryRegion ** pmr)1849 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr)
1850 {
1851     MemoryRegion *mr;
1852     if (ct3d->hostpmem) {
1853         mr = host_memory_backend_get_memory(ct3d->hostpmem);
1854         if (pmr) {
1855             *pmr = mr;
1856         }
1857         return memory_region_size(mr);
1858     }
1859     return 0;
1860 }
1861 
get_dc_size(CXLType3Dev * ct3d,MemoryRegion ** dc_mr)1862 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr)
1863 {
1864     MemoryRegion *mr;
1865     if (ct3d->dc.host_dc) {
1866         mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
1867         if (dc_mr) {
1868             *dc_mr = mr;
1869         }
1870         return memory_region_size(mr);
1871     }
1872     return 0;
1873 }
1874 
validate_dpa_addr(CXLType3Dev * ct3d,uint64_t dpa_addr,size_t length)1875 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
1876                              size_t length)
1877 {
1878     uint64_t vmr_size, pmr_size, dc_size, dpa_end;
1879 
1880     if ((dpa_addr % CXL_CACHE_LINE_SIZE) ||
1881         (length % CXL_CACHE_LINE_SIZE)  ||
1882         (length <= 0)) {
1883         return -EINVAL;
1884     }
1885 
1886     vmr_size = get_vmr_size(ct3d, NULL);
1887     pmr_size = get_pmr_size(ct3d, NULL);
1888     dc_size = get_dc_size(ct3d, NULL);
1889 
1890     /* sanitize 64 bit values coming from guest */
1891     if (uadd64_overflow(dpa_addr, length, &dpa_end)) {
1892         return -EINVAL;
1893     }
1894 
1895     if (dpa_end > vmr_size + pmr_size + dc_size) {
1896         return -EINVAL;
1897     }
1898 
1899     if (dpa_addr > vmr_size + pmr_size) {
1900         if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) {
1901             return -ENODEV;
1902         }
1903     }
1904 
1905     return 0;
1906 }
1907 
sanitize_range(CXLType3Dev * ct3d,uint64_t dpa_addr,size_t length,uint8_t fill_value)1908 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length,
1909                           uint8_t fill_value)
1910 {
1911 
1912     uint64_t vmr_size, pmr_size;
1913     AddressSpace *as = NULL;
1914     MemTxAttrs mem_attrs = {};
1915 
1916     vmr_size = get_vmr_size(ct3d, NULL);
1917     pmr_size = get_pmr_size(ct3d, NULL);
1918 
1919     if (dpa_addr < vmr_size) {
1920         as = &ct3d->hostvmem_as;
1921     } else if (dpa_addr < vmr_size + pmr_size) {
1922         as = &ct3d->hostpmem_as;
1923     } else {
1924         if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) {
1925             return -ENODEV;
1926         }
1927         as = &ct3d->dc.host_dc_as;
1928     }
1929 
1930     return address_space_set(as, dpa_addr, fill_value, length, mem_attrs);
1931 }
1932 
1933 /* Perform the actual device zeroing */
__do_sanitize(CXLType3Dev * ct3d)1934 static void __do_sanitize(CXLType3Dev *ct3d)
1935 {
1936     struct CXLSanitizeInfo  *san_info = ct3d->media_op_sanitize;
1937     int dpa_range_count = san_info->dpa_range_count;
1938     int rc = 0;
1939     int i;
1940 
1941     for (i = 0; i < dpa_range_count; i++) {
1942         rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa,
1943                             san_info->dpa_range_list[i].length,
1944                             san_info->fill_value);
1945         if (rc) {
1946             goto exit;
1947         }
1948     }
1949 exit:
1950     g_free(ct3d->media_op_sanitize);
1951     ct3d->media_op_sanitize = NULL;
1952     return;
1953 }
1954 
1955 enum {
1956     MEDIA_OP_CLASS_GENERAL  = 0x0,
1957         #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0
1958     MEDIA_OP_CLASS_SANITIZE = 0x1,
1959         #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0
1960         #define MEDIA_OP_SAN_SUBC_ZERO 0x1
1961 };
1962 
1963 struct media_op_supported_list_entry {
1964     uint8_t media_op_class;
1965     uint8_t media_op_subclass;
1966 };
1967 
1968 struct media_op_discovery_out_pl {
1969     uint64_t dpa_range_granularity;
1970     uint16_t total_supported_operations;
1971     uint16_t num_of_supported_operations;
1972     struct media_op_supported_list_entry entry[];
1973 } QEMU_PACKED;
1974 
1975 static const struct media_op_supported_list_entry media_op_matrix[] = {
1976     { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY },
1977     { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE },
1978     { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO },
1979 };
1980 
media_operations_discovery(uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out)1981 static CXLRetCode media_operations_discovery(uint8_t *payload_in,
1982                                              size_t len_in,
1983                                              uint8_t *payload_out,
1984                                              size_t *len_out)
1985 {
1986     struct {
1987         uint8_t media_operation_class;
1988         uint8_t media_operation_subclass;
1989         uint8_t rsvd[2];
1990         uint32_t dpa_range_count;
1991         struct {
1992             uint16_t start_index;
1993             uint16_t num_ops;
1994         } discovery_osa;
1995     } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in;
1996     struct media_op_discovery_out_pl *media_out_pl =
1997         (struct media_op_discovery_out_pl *)payload_out;
1998     int num_ops, start_index, i;
1999     int count = 0;
2000 
2001     if (len_in < sizeof(*media_op_in_disc_pl)) {
2002         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2003     }
2004 
2005     num_ops = media_op_in_disc_pl->discovery_osa.num_ops;
2006     start_index = media_op_in_disc_pl->discovery_osa.start_index;
2007 
2008     /*
2009      * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and
2010      * start index should not exceed the total number of entries for discovery
2011      * sub class command.
2012      */
2013     if (media_op_in_disc_pl->dpa_range_count ||
2014         start_index + num_ops > ARRAY_SIZE(media_op_matrix)) {
2015         return CXL_MBOX_INVALID_INPUT;
2016     }
2017 
2018     media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE;
2019     media_out_pl->total_supported_operations =
2020                                      ARRAY_SIZE(media_op_matrix);
2021     if (num_ops > 0) {
2022         for (i = start_index; i < start_index + num_ops; i++) {
2023             media_out_pl->entry[count].media_op_class =
2024                     media_op_matrix[i].media_op_class;
2025             media_out_pl->entry[count].media_op_subclass =
2026                         media_op_matrix[i].media_op_subclass;
2027             count++;
2028             if (count == num_ops) {
2029                 break;
2030             }
2031         }
2032     }
2033 
2034     media_out_pl->num_of_supported_operations = count;
2035     *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry);
2036     return CXL_MBOX_SUCCESS;
2037 }
2038 
media_operations_sanitize(CXLType3Dev * ct3d,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,uint8_t fill_value,CXLCCI * cci)2039 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d,
2040                                             uint8_t *payload_in,
2041                                             size_t len_in,
2042                                             uint8_t *payload_out,
2043                                             size_t *len_out,
2044                                             uint8_t fill_value,
2045                                             CXLCCI *cci)
2046 {
2047     struct media_operations_sanitize {
2048         uint8_t media_operation_class;
2049         uint8_t media_operation_subclass;
2050         uint8_t rsvd[2];
2051         uint32_t dpa_range_count;
2052         struct dpa_range_list_entry dpa_range_list[];
2053     } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in;
2054     uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count;
2055     uint64_t total_mem = 0;
2056     size_t dpa_range_list_size;
2057     int secs = 0, i;
2058 
2059     if (dpa_range_count == 0) {
2060         return CXL_MBOX_SUCCESS;
2061     }
2062 
2063     dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry);
2064     if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) {
2065         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2066     }
2067 
2068     for (i = 0; i < dpa_range_count; i++) {
2069         uint64_t start_dpa =
2070             media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa;
2071         uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length;
2072 
2073         if (validate_dpa_addr(ct3d, start_dpa, length)) {
2074             return CXL_MBOX_INVALID_INPUT;
2075         }
2076         total_mem += length;
2077     }
2078     ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) +
2079                                         dpa_range_list_size);
2080 
2081     ct3d->media_op_sanitize->dpa_range_count = dpa_range_count;
2082     ct3d->media_op_sanitize->fill_value = fill_value;
2083     memcpy(ct3d->media_op_sanitize->dpa_range_list,
2084            media_op_in_sanitize_pl->dpa_range_list,
2085            dpa_range_list_size);
2086     secs = get_sanitize_duration(total_mem >> 20);
2087 
2088     /* EBUSY other bg cmds as of now */
2089     cci->bg.runtime = secs * 1000UL;
2090     *len_out = 0;
2091     /*
2092      * media op sanitize is targeted so no need to disable media or
2093      * clear event logs
2094      */
2095     return CXL_MBOX_BG_STARTED;
2096 }
2097 
cmd_media_operations(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2098 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd,
2099                                        uint8_t *payload_in,
2100                                        size_t len_in,
2101                                        uint8_t *payload_out,
2102                                        size_t *len_out,
2103                                        CXLCCI *cci)
2104 {
2105     struct {
2106         uint8_t media_operation_class;
2107         uint8_t media_operation_subclass;
2108         uint8_t rsvd[2];
2109         uint32_t dpa_range_count;
2110     } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in;
2111     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2112     uint8_t media_op_cl = 0;
2113     uint8_t media_op_subclass = 0;
2114 
2115     if (len_in < sizeof(*media_op_in_common_pl)) {
2116         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2117     }
2118 
2119     media_op_cl = media_op_in_common_pl->media_operation_class;
2120     media_op_subclass = media_op_in_common_pl->media_operation_subclass;
2121 
2122     switch (media_op_cl) {
2123     case MEDIA_OP_CLASS_GENERAL:
2124         if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) {
2125             return CXL_MBOX_UNSUPPORTED;
2126         }
2127 
2128         return media_operations_discovery(payload_in, len_in, payload_out,
2129                                              len_out);
2130     case MEDIA_OP_CLASS_SANITIZE:
2131         switch (media_op_subclass) {
2132         case MEDIA_OP_SAN_SUBC_SANITIZE:
2133             return media_operations_sanitize(ct3d, payload_in, len_in,
2134                                              payload_out, len_out, 0xF,
2135                                              cci);
2136         case MEDIA_OP_SAN_SUBC_ZERO:
2137             return media_operations_sanitize(ct3d, payload_in, len_in,
2138                                              payload_out, len_out, 0,
2139                                              cci);
2140         default:
2141             return CXL_MBOX_UNSUPPORTED;
2142         }
2143     default:
2144         return CXL_MBOX_UNSUPPORTED;
2145     }
2146 }
2147 
cmd_get_security_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2148 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
2149                                          uint8_t *payload_in,
2150                                          size_t len_in,
2151                                          uint8_t *payload_out,
2152                                          size_t *len_out,
2153                                          CXLCCI *cci)
2154 {
2155     uint32_t *state = (uint32_t *)payload_out;
2156 
2157     *state = 0;
2158     *len_out = 4;
2159     return CXL_MBOX_SUCCESS;
2160 }
2161 
2162 /*
2163  * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
2164  *
2165  * This is very inefficient, but good enough for now!
2166  * Also the payload will always fit, so no need to handle the MORE flag and
2167  * make this stateful. We may want to allow longer poison lists to aid
2168  * testing that kernel functionality.
2169  */
cmd_media_get_poison_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2170 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
2171                                             uint8_t *payload_in,
2172                                             size_t len_in,
2173                                             uint8_t *payload_out,
2174                                             size_t *len_out,
2175                                             CXLCCI *cci)
2176 {
2177     struct get_poison_list_pl {
2178         uint64_t pa;
2179         uint64_t length;
2180     } QEMU_PACKED;
2181 
2182     struct get_poison_list_out_pl {
2183         uint8_t flags;
2184         uint8_t rsvd1;
2185         uint64_t overflow_timestamp;
2186         uint16_t count;
2187         uint8_t rsvd2[0x14];
2188         struct {
2189             uint64_t addr;
2190             uint32_t length;
2191             uint32_t resv;
2192         } QEMU_PACKED records[];
2193     } QEMU_PACKED;
2194 
2195     struct get_poison_list_pl *in = (void *)payload_in;
2196     struct get_poison_list_out_pl *out = (void *)payload_out;
2197     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2198     uint16_t record_count = 0, i = 0;
2199     uint64_t query_start, query_length;
2200     CXLPoisonList *poison_list = &ct3d->poison_list;
2201     CXLPoison *ent;
2202     uint16_t out_pl_len;
2203 
2204     query_start = ldq_le_p(&in->pa);
2205     /* 64 byte alignment required */
2206     if (query_start & 0x3f) {
2207         return CXL_MBOX_INVALID_INPUT;
2208     }
2209     query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
2210 
2211     QLIST_FOREACH(ent, poison_list, node) {
2212         /* Check for no overlap */
2213         if (!ranges_overlap(ent->start, ent->length,
2214                             query_start, query_length)) {
2215             continue;
2216         }
2217         record_count++;
2218     }
2219     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2220     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2221 
2222     QLIST_FOREACH(ent, poison_list, node) {
2223         uint64_t start, stop;
2224 
2225         /* Check for no overlap */
2226         if (!ranges_overlap(ent->start, ent->length,
2227                             query_start, query_length)) {
2228             continue;
2229         }
2230 
2231         /* Deal with overlap */
2232         start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
2233         stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
2234                    query_start + query_length);
2235         stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
2236         stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
2237         i++;
2238     }
2239     if (ct3d->poison_list_overflowed) {
2240         out->flags = (1 << 1);
2241         stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
2242     }
2243     if (scan_media_running(cci)) {
2244         out->flags |= (1 << 2);
2245     }
2246 
2247     stw_le_p(&out->count, record_count);
2248     *len_out = out_pl_len;
2249     return CXL_MBOX_SUCCESS;
2250 }
2251 
2252 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
cmd_media_inject_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2253 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
2254                                           uint8_t *payload_in,
2255                                           size_t len_in,
2256                                           uint8_t *payload_out,
2257                                           size_t *len_out,
2258                                           CXLCCI *cci)
2259 {
2260     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2261     CXLPoisonList *poison_list = &ct3d->poison_list;
2262     CXLPoison *ent;
2263     struct inject_poison_pl {
2264         uint64_t dpa;
2265     };
2266     struct inject_poison_pl *in = (void *)payload_in;
2267     uint64_t dpa = ldq_le_p(&in->dpa);
2268     CXLPoison *p;
2269 
2270     QLIST_FOREACH(ent, poison_list, node) {
2271         if (dpa >= ent->start &&
2272             dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
2273             return CXL_MBOX_SUCCESS;
2274         }
2275     }
2276     /*
2277      * Freeze the list if there is an on-going scan media operation.
2278      */
2279     if (scan_media_running(cci)) {
2280         /*
2281          * XXX: Spec is ambiguous - is this case considered
2282          * a successful return despite not adding to the list?
2283          */
2284         goto success;
2285     }
2286 
2287     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
2288         return CXL_MBOX_INJECT_POISON_LIMIT;
2289     }
2290     p = g_new0(CXLPoison, 1);
2291 
2292     p->length = CXL_CACHE_LINE_SIZE;
2293     p->start = dpa;
2294     p->type = CXL_POISON_TYPE_INJECTED;
2295 
2296     /*
2297      * Possible todo: Merge with existing entry if next to it and if same type
2298      */
2299     QLIST_INSERT_HEAD(poison_list, p, node);
2300     ct3d->poison_list_cnt++;
2301 success:
2302     *len_out = 0;
2303 
2304     return CXL_MBOX_SUCCESS;
2305 }
2306 
2307 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
cmd_media_clear_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2308 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
2309                                          uint8_t *payload_in,
2310                                          size_t len_in,
2311                                          uint8_t *payload_out,
2312                                          size_t *len_out,
2313                                          CXLCCI *cci)
2314 {
2315     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2316     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2317     CXLPoisonList *poison_list = &ct3d->poison_list;
2318     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
2319     struct clear_poison_pl {
2320         uint64_t dpa;
2321         uint8_t data[64];
2322     };
2323     CXLPoison *ent;
2324     uint64_t dpa;
2325 
2326     struct clear_poison_pl *in = (void *)payload_in;
2327 
2328     dpa = ldq_le_p(&in->dpa);
2329     if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size +
2330         ct3d->dc.total_capacity) {
2331         return CXL_MBOX_INVALID_PA;
2332     }
2333 
2334     /* Clearing a region with no poison is not an error so always do so */
2335     if (cvc->set_cacheline) {
2336         if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
2337             return CXL_MBOX_INTERNAL_ERROR;
2338         }
2339     }
2340 
2341     /*
2342      * Freeze the list if there is an on-going scan media operation.
2343      */
2344     if (scan_media_running(cci)) {
2345         /*
2346          * XXX: Spec is ambiguous - is this case considered
2347          * a successful return despite not removing from the list?
2348          */
2349         goto success;
2350     }
2351 
2352     QLIST_FOREACH(ent, poison_list, node) {
2353         /*
2354          * Test for contained in entry. Simpler than general case
2355          * as clearing 64 bytes and entries 64 byte aligned
2356          */
2357         if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
2358             break;
2359         }
2360     }
2361     if (!ent) {
2362         goto success;
2363     }
2364 
2365     QLIST_REMOVE(ent, node);
2366     ct3d->poison_list_cnt--;
2367 
2368     if (dpa > ent->start) {
2369         CXLPoison *frag;
2370         /* Cannot overflow as replacing existing entry */
2371 
2372         frag = g_new0(CXLPoison, 1);
2373 
2374         frag->start = ent->start;
2375         frag->length = dpa - ent->start;
2376         frag->type = ent->type;
2377 
2378         QLIST_INSERT_HEAD(poison_list, frag, node);
2379         ct3d->poison_list_cnt++;
2380     }
2381 
2382     if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
2383         CXLPoison *frag;
2384 
2385         if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
2386             cxl_set_poison_list_overflowed(ct3d);
2387         } else {
2388             frag = g_new0(CXLPoison, 1);
2389 
2390             frag->start = dpa + CXL_CACHE_LINE_SIZE;
2391             frag->length = ent->start + ent->length - frag->start;
2392             frag->type = ent->type;
2393             QLIST_INSERT_HEAD(poison_list, frag, node);
2394             ct3d->poison_list_cnt++;
2395         }
2396     }
2397     /* Any fragments have been added, free original entry */
2398     g_free(ent);
2399 success:
2400     *len_out = 0;
2401 
2402     return CXL_MBOX_SUCCESS;
2403 }
2404 
2405 /*
2406  * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
2407  */
2408 static CXLRetCode
cmd_media_get_scan_media_capabilities(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2409 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd,
2410                                       uint8_t *payload_in,
2411                                       size_t len_in,
2412                                       uint8_t *payload_out,
2413                                       size_t *len_out,
2414                                       CXLCCI *cci)
2415 {
2416     struct get_scan_media_capabilities_pl {
2417         uint64_t pa;
2418         uint64_t length;
2419     } QEMU_PACKED;
2420 
2421     struct get_scan_media_capabilities_out_pl {
2422         uint32_t estimated_runtime_ms;
2423     };
2424 
2425     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2426     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2427     struct get_scan_media_capabilities_pl *in = (void *)payload_in;
2428     struct get_scan_media_capabilities_out_pl *out = (void *)payload_out;
2429     uint64_t query_start;
2430     uint64_t query_length;
2431 
2432     query_start = ldq_le_p(&in->pa);
2433     /* 64 byte alignment required */
2434     if (query_start & 0x3f) {
2435         return CXL_MBOX_INVALID_INPUT;
2436     }
2437     query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
2438 
2439     if (query_start + query_length > cxl_dstate->static_mem_size) {
2440         return CXL_MBOX_INVALID_PA;
2441     }
2442 
2443     /*
2444      * Just use 400 nanosecond access/read latency + 100 ns for
2445      * the cost of updating the poison list. For small enough
2446      * chunks return at least 1 ms.
2447      */
2448     stl_le_p(&out->estimated_runtime_ms,
2449              MAX(1, query_length * (0.0005L / 64)));
2450 
2451     *len_out = sizeof(*out);
2452     return CXL_MBOX_SUCCESS;
2453 }
2454 
__do_scan_media(CXLType3Dev * ct3d)2455 static void __do_scan_media(CXLType3Dev *ct3d)
2456 {
2457     CXLPoison *ent;
2458     unsigned int results_cnt = 0;
2459 
2460     QLIST_FOREACH(ent, &ct3d->scan_media_results, node) {
2461         results_cnt++;
2462     }
2463 
2464     /* only scan media may clear the overflow */
2465     if (ct3d->poison_list_overflowed &&
2466         ct3d->poison_list_cnt == results_cnt) {
2467         cxl_clear_poison_list_overflowed(ct3d);
2468     }
2469     /* scan media has run since last conventional reset */
2470     ct3d->scan_media_hasrun = true;
2471 }
2472 
2473 /*
2474  * CXL r3.1 section 8.2.9.9.4.5: Scan Media
2475  */
cmd_media_scan_media(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2476 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd,
2477                                        uint8_t *payload_in,
2478                                        size_t len_in,
2479                                        uint8_t *payload_out,
2480                                        size_t *len_out,
2481                                        CXLCCI *cci)
2482 {
2483     struct scan_media_pl {
2484         uint64_t pa;
2485         uint64_t length;
2486         uint8_t flags;
2487     } QEMU_PACKED;
2488 
2489     struct scan_media_pl *in = (void *)payload_in;
2490     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2491     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2492     uint64_t query_start;
2493     uint64_t query_length;
2494     CXLPoison *ent, *next;
2495 
2496     query_start = ldq_le_p(&in->pa);
2497     /* 64 byte alignment required */
2498     if (query_start & 0x3f) {
2499         return CXL_MBOX_INVALID_INPUT;
2500     }
2501     query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
2502 
2503     if (query_start + query_length > cxl_dstate->static_mem_size) {
2504         return CXL_MBOX_INVALID_PA;
2505     }
2506     if (ct3d->dc.num_regions && query_start + query_length >=
2507             cxl_dstate->static_mem_size + ct3d->dc.total_capacity) {
2508         return CXL_MBOX_INVALID_PA;
2509     }
2510 
2511     if (in->flags == 0) { /* TODO */
2512         qemu_log_mask(LOG_UNIMP,
2513                       "Scan Media Event Log is unsupported\n");
2514     }
2515 
2516     /* any previous results are discarded upon a new Scan Media */
2517     QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) {
2518         QLIST_REMOVE(ent, node);
2519         g_free(ent);
2520     }
2521 
2522     /* kill the poison list - it will be recreated */
2523     if (ct3d->poison_list_overflowed) {
2524         QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) {
2525             QLIST_REMOVE(ent, node);
2526             g_free(ent);
2527             ct3d->poison_list_cnt--;
2528         }
2529     }
2530 
2531     /*
2532      * Scan the backup list and move corresponding entries
2533      * into the results list, updating the poison list
2534      * when possible.
2535      */
2536     QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) {
2537         CXLPoison *res;
2538 
2539         if (ent->start >= query_start + query_length ||
2540             ent->start + ent->length <= query_start) {
2541             continue;
2542         }
2543 
2544         /*
2545          * If a Get Poison List cmd comes in while this
2546          * scan is being done, it will see the new complete
2547          * list, while setting the respective flag.
2548          */
2549         if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
2550             CXLPoison *p = g_new0(CXLPoison, 1);
2551 
2552             p->start = ent->start;
2553             p->length = ent->length;
2554             p->type = ent->type;
2555             QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
2556             ct3d->poison_list_cnt++;
2557         }
2558 
2559         res = g_new0(CXLPoison, 1);
2560         res->start = ent->start;
2561         res->length = ent->length;
2562         res->type = ent->type;
2563         QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node);
2564 
2565         QLIST_REMOVE(ent, node);
2566         g_free(ent);
2567     }
2568 
2569     cci->bg.runtime = MAX(1, query_length * (0.0005L / 64));
2570     *len_out = 0;
2571 
2572     return CXL_MBOX_BG_STARTED;
2573 }
2574 
2575 /*
2576  * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
2577  */
cmd_media_get_scan_media_results(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2578 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd,
2579                                                    uint8_t *payload_in,
2580                                                    size_t len_in,
2581                                                    uint8_t *payload_out,
2582                                                    size_t *len_out,
2583                                                    CXLCCI *cci)
2584 {
2585     struct get_scan_media_results_out_pl {
2586         uint64_t dpa_restart;
2587         uint64_t length;
2588         uint8_t flags;
2589         uint8_t rsvd1;
2590         uint16_t count;
2591         uint8_t rsvd2[0xc];
2592         struct {
2593             uint64_t addr;
2594             uint32_t length;
2595             uint32_t resv;
2596         } QEMU_PACKED records[];
2597     } QEMU_PACKED;
2598 
2599     struct get_scan_media_results_out_pl *out = (void *)payload_out;
2600     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2601     CXLPoisonList *scan_media_results = &ct3d->scan_media_results;
2602     CXLPoison *ent, *next;
2603     uint16_t total_count = 0, record_count = 0, i = 0;
2604     uint16_t out_pl_len;
2605 
2606     if (!ct3d->scan_media_hasrun) {
2607         return CXL_MBOX_UNSUPPORTED;
2608     }
2609 
2610     /*
2611      * Calculate limits, all entries are within the same address range of the
2612      * last scan media call.
2613      */
2614     QLIST_FOREACH(ent, scan_media_results, node) {
2615         size_t rec_size = record_count * sizeof(out->records[0]);
2616 
2617         if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) {
2618             record_count++;
2619         }
2620         total_count++;
2621     }
2622 
2623     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2624     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2625 
2626     memset(out, 0, out_pl_len);
2627     QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) {
2628         uint64_t start, stop;
2629 
2630         if (i == record_count) {
2631             break;
2632         }
2633 
2634         start = ROUND_DOWN(ent->start, 64ull);
2635         stop = ROUND_DOWN(ent->start, 64ull) + ent->length;
2636         stq_le_p(&out->records[i].addr, start);
2637         stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
2638         i++;
2639 
2640         /* consume the returning entry */
2641         QLIST_REMOVE(ent, node);
2642         g_free(ent);
2643     }
2644 
2645     stw_le_p(&out->count, record_count);
2646     if (total_count > record_count) {
2647         out->flags = (1 << 0); /* More Media Error Records */
2648     }
2649 
2650     *len_out = out_pl_len;
2651     return CXL_MBOX_SUCCESS;
2652 }
2653 
2654 /*
2655  * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
2656  * (Opcode: 4800h)
2657  */
cmd_dcd_get_dyn_cap_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2658 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd,
2659                                              uint8_t *payload_in,
2660                                              size_t len_in,
2661                                              uint8_t *payload_out,
2662                                              size_t *len_out,
2663                                              CXLCCI *cci)
2664 {
2665     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2666     struct {
2667         uint8_t region_cnt;
2668         uint8_t start_rid;
2669     } QEMU_PACKED *in = (void *)payload_in;
2670     struct {
2671         uint8_t num_regions;
2672         uint8_t regions_returned;
2673         uint8_t rsvd1[6];
2674         struct {
2675             uint64_t base;
2676             uint64_t decode_len;
2677             uint64_t region_len;
2678             uint64_t block_size;
2679             uint32_t dsmadhandle;
2680             uint8_t flags;
2681             uint8_t rsvd2[3];
2682         } QEMU_PACKED records[];
2683     } QEMU_PACKED *out = (void *)payload_out;
2684     struct {
2685         uint32_t num_extents_supported;
2686         uint32_t num_extents_available;
2687         uint32_t num_tags_supported;
2688         uint32_t num_tags_available;
2689     } QEMU_PACKED *extra_out;
2690     uint16_t record_count;
2691     uint16_t i;
2692     uint16_t out_pl_len;
2693     uint8_t start_rid;
2694 
2695     start_rid = in->start_rid;
2696     if (start_rid >= ct3d->dc.num_regions) {
2697         return CXL_MBOX_INVALID_INPUT;
2698     }
2699 
2700     record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
2701 
2702     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2703     extra_out = (void *)(payload_out + out_pl_len);
2704     out_pl_len += sizeof(*extra_out);
2705     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2706 
2707     out->num_regions = ct3d->dc.num_regions;
2708     out->regions_returned = record_count;
2709     for (i = 0; i < record_count; i++) {
2710         stq_le_p(&out->records[i].base,
2711                  ct3d->dc.regions[start_rid + i].base);
2712         stq_le_p(&out->records[i].decode_len,
2713                  ct3d->dc.regions[start_rid + i].decode_len /
2714                  CXL_CAPACITY_MULTIPLIER);
2715         stq_le_p(&out->records[i].region_len,
2716                  ct3d->dc.regions[start_rid + i].len);
2717         stq_le_p(&out->records[i].block_size,
2718                  ct3d->dc.regions[start_rid + i].block_size);
2719         stl_le_p(&out->records[i].dsmadhandle,
2720                  ct3d->dc.regions[start_rid + i].dsmadhandle);
2721         out->records[i].flags = ct3d->dc.regions[start_rid + i].flags;
2722     }
2723     /*
2724      * TODO: Assign values once extents and tags are introduced
2725      * to use.
2726      */
2727     stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
2728     stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
2729              ct3d->dc.total_extent_count);
2730     stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
2731     stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
2732 
2733     *len_out = out_pl_len;
2734     return CXL_MBOX_SUCCESS;
2735 }
2736 
2737 /*
2738  * CXL r3.1 section 8.2.9.9.9.2:
2739  * Get Dynamic Capacity Extent List (Opcode 4801h)
2740  */
cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2741 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
2742                                                uint8_t *payload_in,
2743                                                size_t len_in,
2744                                                uint8_t *payload_out,
2745                                                size_t *len_out,
2746                                                CXLCCI *cci)
2747 {
2748     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2749     struct {
2750         uint32_t extent_cnt;
2751         uint32_t start_extent_id;
2752     } QEMU_PACKED *in = (void *)payload_in;
2753     struct {
2754         uint32_t count;
2755         uint32_t total_extents;
2756         uint32_t generation_num;
2757         uint8_t rsvd[4];
2758         CXLDCExtentRaw records[];
2759     } QEMU_PACKED *out = (void *)payload_out;
2760     uint32_t start_extent_id = in->start_extent_id;
2761     CXLDCExtentList *extent_list = &ct3d->dc.extents;
2762     uint16_t record_count = 0, i = 0, record_done = 0;
2763     uint16_t out_pl_len, size;
2764     CXLDCExtent *ent;
2765 
2766     if (start_extent_id > ct3d->dc.nr_extents_accepted) {
2767         return CXL_MBOX_INVALID_INPUT;
2768     }
2769 
2770     record_count = MIN(in->extent_cnt,
2771                        ct3d->dc.total_extent_count - start_extent_id);
2772     size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
2773     record_count = MIN(record_count, size / sizeof(out->records[0]));
2774     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2775 
2776     stl_le_p(&out->count, record_count);
2777     stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted);
2778     stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq);
2779 
2780     if (record_count > 0) {
2781         CXLDCExtentRaw *out_rec = &out->records[record_done];
2782 
2783         QTAILQ_FOREACH(ent, extent_list, node) {
2784             if (i++ < start_extent_id) {
2785                 continue;
2786             }
2787             stq_le_p(&out_rec->start_dpa, ent->start_dpa);
2788             stq_le_p(&out_rec->len, ent->len);
2789             memcpy(&out_rec->tag, ent->tag, 0x10);
2790             stw_le_p(&out_rec->shared_seq, ent->shared_seq);
2791 
2792             record_done++;
2793             out_rec++;
2794             if (record_done == record_count) {
2795                 break;
2796             }
2797         }
2798     }
2799 
2800     *len_out = out_pl_len;
2801     return CXL_MBOX_SUCCESS;
2802 }
2803 
2804 /*
2805  * Check whether any bit between addr[nr, nr+size) is set,
2806  * return true if any bit is set, otherwise return false
2807  */
test_any_bits_set(const unsigned long * addr,unsigned long nr,unsigned long size)2808 bool test_any_bits_set(const unsigned long *addr, unsigned long nr,
2809                               unsigned long size)
2810 {
2811     unsigned long res = find_next_bit(addr, size + nr, nr);
2812 
2813     return res < nr + size;
2814 }
2815 
cxl_find_dc_region(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)2816 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len)
2817 {
2818     int i;
2819     CXLDCRegion *region = &ct3d->dc.regions[0];
2820 
2821     if (dpa < region->base ||
2822         dpa >= region->base + ct3d->dc.total_capacity) {
2823         return NULL;
2824     }
2825 
2826     /*
2827      * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD)
2828      *
2829      * Regions are used in increasing-DPA order, with Region 0 being used for
2830      * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA.
2831      * So check from the last region to find where the dpa belongs. Extents that
2832      * cross multiple regions are not allowed.
2833      */
2834     for (i = ct3d->dc.num_regions - 1; i >= 0; i--) {
2835         region = &ct3d->dc.regions[i];
2836         if (dpa >= region->base) {
2837             if (dpa + len > region->base + region->len) {
2838                 return NULL;
2839             }
2840             return region;
2841         }
2842     }
2843 
2844     return NULL;
2845 }
2846 
cxl_insert_extent_to_extent_list(CXLDCExtentList * list,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2847 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list,
2848                                              uint64_t dpa,
2849                                              uint64_t len,
2850                                              uint8_t *tag,
2851                                              uint16_t shared_seq)
2852 {
2853     CXLDCExtent *extent;
2854 
2855     extent = g_new0(CXLDCExtent, 1);
2856     extent->start_dpa = dpa;
2857     extent->len = len;
2858     if (tag) {
2859         memcpy(extent->tag, tag, 0x10);
2860     }
2861     extent->shared_seq = shared_seq;
2862 
2863     QTAILQ_INSERT_TAIL(list, extent, node);
2864 }
2865 
cxl_remove_extent_from_extent_list(CXLDCExtentList * list,CXLDCExtent * extent)2866 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list,
2867                                         CXLDCExtent *extent)
2868 {
2869     QTAILQ_REMOVE(list, extent, node);
2870     g_free(extent);
2871 }
2872 
2873 /*
2874  * Add a new extent to the extent "group" if group exists;
2875  * otherwise, create a new group
2876  * Return value: the extent group where the extent is inserted.
2877  */
cxl_insert_extent_to_extent_group(CXLDCExtentGroup * group,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2878 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
2879                                                     uint64_t dpa,
2880                                                     uint64_t len,
2881                                                     uint8_t *tag,
2882                                                     uint16_t shared_seq)
2883 {
2884     if (!group) {
2885         group = g_new0(CXLDCExtentGroup, 1);
2886         QTAILQ_INIT(&group->list);
2887     }
2888     cxl_insert_extent_to_extent_list(&group->list, dpa, len,
2889                                      tag, shared_seq);
2890     return group;
2891 }
2892 
cxl_extent_group_list_insert_tail(CXLDCExtentGroupList * list,CXLDCExtentGroup * group)2893 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
2894                                        CXLDCExtentGroup *group)
2895 {
2896     QTAILQ_INSERT_TAIL(list, group, node);
2897 }
2898 
cxl_extent_group_list_delete_front(CXLDCExtentGroupList * list)2899 uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
2900 {
2901     CXLDCExtent *ent, *ent_next;
2902     CXLDCExtentGroup *group = QTAILQ_FIRST(list);
2903     uint32_t extents_deleted = 0;
2904 
2905     QTAILQ_REMOVE(list, group, node);
2906     QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
2907         cxl_remove_extent_from_extent_list(&group->list, ent);
2908         extents_deleted++;
2909     }
2910     g_free(group);
2911 
2912     return extents_deleted;
2913 }
2914 
2915 /*
2916  * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload
2917  * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload
2918  */
2919 typedef struct CXLUpdateDCExtentListInPl {
2920     uint32_t num_entries_updated;
2921     uint8_t flags;
2922     uint8_t rsvd[3];
2923     /* CXL r3.1 Table 8-169: Updated Extent */
2924     struct {
2925         uint64_t start_dpa;
2926         uint64_t len;
2927         uint8_t rsvd[8];
2928     } QEMU_PACKED updated_entries[];
2929 } QEMU_PACKED CXLUpdateDCExtentListInPl;
2930 
2931 /*
2932  * For the extents in the extent list to operate, check whether they are valid
2933  * 1. The extent should be in the range of a valid DC region;
2934  * 2. The extent should not cross multiple regions;
2935  * 3. The start DPA and the length of the extent should align with the block
2936  * size of the region;
2937  * 4. The address range of multiple extents in the list should not overlap.
2938  */
cxl_detect_malformed_extent_list(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2939 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
2940         const CXLUpdateDCExtentListInPl *in)
2941 {
2942     uint64_t min_block_size = UINT64_MAX;
2943     CXLDCRegion *region;
2944     CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1];
2945     g_autofree unsigned long *blk_bitmap = NULL;
2946     uint64_t dpa, len;
2947     uint32_t i;
2948 
2949     for (i = 0; i < ct3d->dc.num_regions; i++) {
2950         region = &ct3d->dc.regions[i];
2951         min_block_size = MIN(min_block_size, region->block_size);
2952     }
2953 
2954     blk_bitmap = bitmap_new((lastregion->base + lastregion->len -
2955                              ct3d->dc.regions[0].base) / min_block_size);
2956 
2957     for (i = 0; i < in->num_entries_updated; i++) {
2958         dpa = in->updated_entries[i].start_dpa;
2959         len = in->updated_entries[i].len;
2960 
2961         region = cxl_find_dc_region(ct3d, dpa, len);
2962         if (!region) {
2963             return CXL_MBOX_INVALID_PA;
2964         }
2965 
2966         dpa -= ct3d->dc.regions[0].base;
2967         if (dpa % region->block_size || len % region->block_size) {
2968             return CXL_MBOX_INVALID_EXTENT_LIST;
2969         }
2970         /* the dpa range already covered by some other extents in the list */
2971         if (test_any_bits_set(blk_bitmap, dpa / min_block_size,
2972             len / min_block_size)) {
2973             return CXL_MBOX_INVALID_EXTENT_LIST;
2974         }
2975         bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size);
2976    }
2977 
2978     return CXL_MBOX_SUCCESS;
2979 }
2980 
cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2981 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
2982         const CXLUpdateDCExtentListInPl *in)
2983 {
2984     uint32_t i;
2985     CXLDCExtent *ent;
2986     CXLDCExtentGroup *ext_group;
2987     uint64_t dpa, len;
2988     Range range1, range2;
2989 
2990     for (i = 0; i < in->num_entries_updated; i++) {
2991         dpa = in->updated_entries[i].start_dpa;
2992         len = in->updated_entries[i].len;
2993 
2994         range_init_nofail(&range1, dpa, len);
2995 
2996         /*
2997          * The host-accepted DPA range must be contained by the first extent
2998          * group in the pending list
2999          */
3000         ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending);
3001         if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) {
3002             return CXL_MBOX_INVALID_PA;
3003         }
3004 
3005         /* to-be-added range should not overlap with range already accepted */
3006         QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
3007             range_init_nofail(&range2, ent->start_dpa, ent->len);
3008             if (range_overlaps_range(&range1, &range2)) {
3009                 return CXL_MBOX_INVALID_PA;
3010             }
3011         }
3012     }
3013     return CXL_MBOX_SUCCESS;
3014 }
3015 
3016 /*
3017  * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h)
3018  * An extent is added to the extent list and becomes usable only after the
3019  * response is processed successfully.
3020  */
cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3021 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
3022                                           uint8_t *payload_in,
3023                                           size_t len_in,
3024                                           uint8_t *payload_out,
3025                                           size_t *len_out,
3026                                           CXLCCI *cci)
3027 {
3028     CXLUpdateDCExtentListInPl *in = (void *)payload_in;
3029     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3030     CXLDCExtentList *extent_list = &ct3d->dc.extents;
3031     uint32_t i, num;
3032     uint64_t dpa, len;
3033     CXLRetCode ret;
3034 
3035     if (len_in < sizeof(*in)) {
3036         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3037     }
3038 
3039     if (in->num_entries_updated == 0) {
3040         num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
3041         ct3d->dc.total_extent_count -= num;
3042         return CXL_MBOX_SUCCESS;
3043     }
3044 
3045     if (len_in <
3046         sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
3047         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3048     }
3049 
3050     /* Adding extents causes exceeding device's extent tracking ability. */
3051     if (in->num_entries_updated + ct3d->dc.total_extent_count >
3052         CXL_NUM_EXTENTS_SUPPORTED) {
3053         return CXL_MBOX_RESOURCES_EXHAUSTED;
3054     }
3055 
3056     ret = cxl_detect_malformed_extent_list(ct3d, in);
3057     if (ret != CXL_MBOX_SUCCESS) {
3058         return ret;
3059     }
3060 
3061     ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in);
3062     if (ret != CXL_MBOX_SUCCESS) {
3063         return ret;
3064     }
3065 
3066     for (i = 0; i < in->num_entries_updated; i++) {
3067         dpa = in->updated_entries[i].start_dpa;
3068         len = in->updated_entries[i].len;
3069 
3070         cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0);
3071         ct3d->dc.total_extent_count += 1;
3072         ct3d->dc.nr_extents_accepted += 1;
3073         ct3_set_region_block_backed(ct3d, dpa, len);
3074     }
3075     /* Remove the first extent group in the pending list */
3076     num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
3077     ct3d->dc.total_extent_count -= num;
3078 
3079     return CXL_MBOX_SUCCESS;
3080 }
3081 
3082 /*
3083  * Copy extent list from src to dst
3084  * Return value: number of extents copied
3085  */
copy_extent_list(CXLDCExtentList * dst,const CXLDCExtentList * src)3086 static uint32_t copy_extent_list(CXLDCExtentList *dst,
3087                                  const CXLDCExtentList *src)
3088 {
3089     uint32_t cnt = 0;
3090     CXLDCExtent *ent;
3091 
3092     if (!dst || !src) {
3093         return 0;
3094     }
3095 
3096     QTAILQ_FOREACH(ent, src, node) {
3097         cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len,
3098                                          ent->tag, ent->shared_seq);
3099         cnt++;
3100     }
3101     return cnt;
3102 }
3103 
cxl_dc_extent_release_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in,CXLDCExtentList * updated_list,uint32_t * updated_list_size)3104 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d,
3105         const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list,
3106         uint32_t *updated_list_size)
3107 {
3108     CXLDCExtent *ent, *ent_next;
3109     uint64_t dpa, len;
3110     uint32_t i;
3111     int cnt_delta = 0;
3112     CXLRetCode ret = CXL_MBOX_SUCCESS;
3113 
3114     QTAILQ_INIT(updated_list);
3115     copy_extent_list(updated_list, &ct3d->dc.extents);
3116 
3117     for (i = 0; i < in->num_entries_updated; i++) {
3118         Range range;
3119 
3120         dpa = in->updated_entries[i].start_dpa;
3121         len = in->updated_entries[i].len;
3122 
3123         /* Check if the DPA range is not fully backed with valid extents */
3124         if (!ct3_test_region_block_backed(ct3d, dpa, len)) {
3125             ret = CXL_MBOX_INVALID_PA;
3126             goto free_and_exit;
3127         }
3128 
3129         /* After this point, extent overflow is the only error can happen */
3130         while (len > 0) {
3131             QTAILQ_FOREACH(ent, updated_list, node) {
3132                 range_init_nofail(&range, ent->start_dpa, ent->len);
3133 
3134                 if (range_contains(&range, dpa)) {
3135                     uint64_t len1, len2 = 0, len_done = 0;
3136                     uint64_t ent_start_dpa = ent->start_dpa;
3137                     uint64_t ent_len = ent->len;
3138 
3139                     len1 = dpa - ent->start_dpa;
3140                     /* Found the extent or the subset of an existing extent */
3141                     if (range_contains(&range, dpa + len - 1)) {
3142                         len2 = ent_start_dpa + ent_len - dpa - len;
3143                     } else {
3144                         dpa = ent_start_dpa + ent_len;
3145                     }
3146                     len_done = ent_len - len1 - len2;
3147 
3148                     cxl_remove_extent_from_extent_list(updated_list, ent);
3149                     cnt_delta--;
3150 
3151                     if (len1) {
3152                         cxl_insert_extent_to_extent_list(updated_list,
3153                                                          ent_start_dpa,
3154                                                          len1, NULL, 0);
3155                         cnt_delta++;
3156                     }
3157                     if (len2) {
3158                         cxl_insert_extent_to_extent_list(updated_list,
3159                                                          dpa + len,
3160                                                          len2, NULL, 0);
3161                         cnt_delta++;
3162                     }
3163 
3164                     if (cnt_delta + ct3d->dc.total_extent_count >
3165                             CXL_NUM_EXTENTS_SUPPORTED) {
3166                         ret = CXL_MBOX_RESOURCES_EXHAUSTED;
3167                         goto free_and_exit;
3168                     }
3169 
3170                     len -= len_done;
3171                     break;
3172                 }
3173             }
3174         }
3175     }
3176 free_and_exit:
3177     if (ret != CXL_MBOX_SUCCESS) {
3178         QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) {
3179             cxl_remove_extent_from_extent_list(updated_list, ent);
3180         }
3181         *updated_list_size = 0;
3182     } else {
3183         *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta;
3184     }
3185 
3186     return ret;
3187 }
3188 
3189 /*
3190  * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h)
3191  */
cmd_dcd_release_dyn_cap(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3192 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
3193                                           uint8_t *payload_in,
3194                                           size_t len_in,
3195                                           uint8_t *payload_out,
3196                                           size_t *len_out,
3197                                           CXLCCI *cci)
3198 {
3199     CXLUpdateDCExtentListInPl *in = (void *)payload_in;
3200     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3201     CXLDCExtentList updated_list;
3202     CXLDCExtent *ent, *ent_next;
3203     uint32_t updated_list_size;
3204     CXLRetCode ret;
3205 
3206     if (len_in < sizeof(*in)) {
3207         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3208     }
3209 
3210     if (in->num_entries_updated == 0) {
3211         return CXL_MBOX_INVALID_INPUT;
3212     }
3213 
3214     if (len_in <
3215         sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
3216         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3217     }
3218 
3219     ret = cxl_detect_malformed_extent_list(ct3d, in);
3220     if (ret != CXL_MBOX_SUCCESS) {
3221         return ret;
3222     }
3223 
3224     ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list,
3225                                         &updated_list_size);
3226     if (ret != CXL_MBOX_SUCCESS) {
3227         return ret;
3228     }
3229 
3230     /*
3231      * If the dry run release passes, the returned updated_list will
3232      * be the updated extent list and we just need to clear the extents
3233      * in the accepted list and copy extents in the updated_list to accepted
3234      * list and update the extent count;
3235      */
3236     QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
3237         ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len);
3238         cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
3239     }
3240     copy_extent_list(&ct3d->dc.extents, &updated_list);
3241     QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) {
3242         ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len);
3243         cxl_remove_extent_from_extent_list(&updated_list, ent);
3244     }
3245     ct3d->dc.total_extent_count += (updated_list_size -
3246                                     ct3d->dc.nr_extents_accepted);
3247 
3248     ct3d->dc.nr_extents_accepted = updated_list_size;
3249 
3250     return CXL_MBOX_SUCCESS;
3251 }
3252 
3253 /* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */
cmd_fm_get_dcd_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3254 static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd,
3255                                       uint8_t *payload_in,
3256                                       size_t len_in,
3257                                       uint8_t *payload_out,
3258                                       size_t *len_out,
3259                                       CXLCCI *cci)
3260 {
3261     struct {
3262         uint8_t num_hosts;
3263         uint8_t num_regions_supported;
3264         uint8_t rsvd1[2];
3265         uint16_t supported_add_sel_policy_bitmask;
3266         uint8_t rsvd2[2];
3267         uint16_t supported_removal_policy_bitmask;
3268         uint8_t sanitize_on_release_bitmask;
3269         uint8_t rsvd3;
3270         uint64_t total_dynamic_capacity;
3271         uint64_t region_blk_size_bitmasks[8];
3272     } QEMU_PACKED *out = (void *)payload_out;
3273     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3274     CXLDCRegion *region;
3275     int i;
3276 
3277     out->num_hosts = 1;
3278     out->num_regions_supported = ct3d->dc.num_regions;
3279     stw_le_p(&out->supported_add_sel_policy_bitmask,
3280              BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE));
3281     stw_le_p(&out->supported_removal_policy_bitmask,
3282              BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE));
3283     out->sanitize_on_release_bitmask = 0;
3284 
3285     stq_le_p(&out->total_dynamic_capacity,
3286              ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER);
3287 
3288     for (i = 0; i < ct3d->dc.num_regions; i++) {
3289         region = &ct3d->dc.regions[i];
3290         memcpy(&out->region_blk_size_bitmasks[i],
3291                &region->supported_blk_size_bitmask,
3292                sizeof(out->region_blk_size_bitmasks[i]));
3293     }
3294 
3295     *len_out = sizeof(*out);
3296     return CXL_MBOX_SUCCESS;
3297 }
3298 
build_dsmas_flags(uint8_t * flags,CXLDCRegion * region)3299 static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region)
3300 {
3301     *flags = 0;
3302 
3303     if (region->nonvolatile) {
3304         *flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE);
3305     }
3306     if (region->sharable) {
3307         *flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE);
3308     }
3309     if (region->hw_managed_coherency) {
3310         *flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY);
3311     }
3312     if (region->ic_specific_dc_management) {
3313         *flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT);
3314     }
3315     if (region->rdonly) {
3316         *flags |= BIT(CXL_DSMAS_FLAGS_RDONLY);
3317     }
3318 }
3319 
3320 /*
3321  * CXL r3.2 section 7.6.7.6.2:
3322  * Get Host DC Region Configuration (Opcode 5601h)
3323  */
cmd_fm_get_host_dc_region_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3324 static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd,
3325                                                    uint8_t *payload_in,
3326                                                    size_t len_in,
3327                                                    uint8_t *payload_out,
3328                                                    size_t *len_out,
3329                                                    CXLCCI *cci)
3330 {
3331     struct {
3332         uint16_t host_id;
3333         uint8_t region_cnt;
3334         uint8_t start_rid;
3335     } QEMU_PACKED *in = (void *)payload_in;
3336     struct {
3337         uint16_t host_id;
3338         uint8_t num_regions;
3339         uint8_t regions_returned;
3340         struct {
3341             uint64_t base;
3342             uint64_t decode_len;
3343             uint64_t region_len;
3344             uint64_t block_size;
3345             uint8_t flags;
3346             uint8_t rsvd1[3];
3347             uint8_t sanitize;
3348             uint8_t rsvd2[3];
3349         } QEMU_PACKED records[];
3350     } QEMU_PACKED *out = (void *)payload_out;
3351     struct {
3352         uint32_t num_extents_supported;
3353         uint32_t num_extents_available;
3354         uint32_t num_tags_supported;
3355         uint32_t num_tags_available;
3356     } QEMU_PACKED *extra_out;
3357     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3358     uint16_t record_count, out_pl_len, i;
3359 
3360     if (in->start_rid >= ct3d->dc.num_regions) {
3361         return CXL_MBOX_INVALID_INPUT;
3362     }
3363     record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
3364 
3365     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
3366     extra_out = (void *)out + out_pl_len;
3367     out_pl_len += sizeof(*extra_out);
3368 
3369     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
3370 
3371     stw_le_p(&out->host_id, 0);
3372     out->num_regions = ct3d->dc.num_regions;
3373     out->regions_returned = record_count;
3374 
3375     for (i = 0; i < record_count; i++) {
3376         stq_le_p(&out->records[i].base,
3377                  ct3d->dc.regions[in->start_rid + i].base);
3378         stq_le_p(&out->records[i].decode_len,
3379                  ct3d->dc.regions[in->start_rid + i].decode_len /
3380                  CXL_CAPACITY_MULTIPLIER);
3381         stq_le_p(&out->records[i].region_len,
3382                  ct3d->dc.regions[in->start_rid + i].len);
3383         stq_le_p(&out->records[i].block_size,
3384                  ct3d->dc.regions[in->start_rid + i].block_size);
3385         build_dsmas_flags(&out->records[i].flags,
3386                           &ct3d->dc.regions[in->start_rid + i]);
3387         /* Sanitize is bit 0 of flags. */
3388         out->records[i].sanitize =
3389             ct3d->dc.regions[in->start_rid + i].flags & BIT(0);
3390     }
3391 
3392     stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
3393     stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
3394              ct3d->dc.total_extent_count);
3395     stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
3396     stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
3397 
3398     *len_out = out_pl_len;
3399     return CXL_MBOX_SUCCESS;
3400 }
3401 
3402 /* CXL r3.2 section 7.6.7.6.3: Set Host DC Region Configuration (Opcode 5602) */
cmd_fm_set_dc_region_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3403 static CXLRetCode cmd_fm_set_dc_region_config(const struct cxl_cmd *cmd,
3404                                               uint8_t *payload_in,
3405                                               size_t len_in,
3406                                               uint8_t *payload_out,
3407                                               size_t *len_out,
3408                                               CXLCCI *cci)
3409 {
3410     struct {
3411         uint8_t reg_id;
3412         uint8_t rsvd[3];
3413         uint64_t block_sz;
3414         uint8_t flags;
3415         uint8_t rsvd2[3];
3416     } QEMU_PACKED *in = (void *)payload_in;
3417     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3418     CXLEventDynamicCapacity dcEvent = {};
3419     CXLDCRegion *region = &ct3d->dc.regions[in->reg_id];
3420 
3421     /*
3422      * CXL r3.2 7.6.7.6.3: Set DC Region Configuration
3423      * This command shall fail with Unsupported when the Sanitize on Release
3424      * field does not match the region’s configuration... and the device
3425      * does not support reconfiguration of the Sanitize on Release setting.
3426      *
3427      * Currently not reconfigurable, so always fail if sanitize bit (bit 0)
3428      * doesn't match.
3429      */
3430     if ((in->flags & 0x1) != (region->flags & 0x1)) {
3431         return CXL_MBOX_UNSUPPORTED;
3432     }
3433 
3434     if (in->reg_id >= DCD_MAX_NUM_REGION) {
3435         return CXL_MBOX_UNSUPPORTED;
3436     }
3437 
3438     /* Check that no extents are in the region being reconfigured */
3439     if (!bitmap_empty(region->blk_bitmap, region->len / region->block_size)) {
3440         return CXL_MBOX_UNSUPPORTED;
3441     }
3442 
3443     /* Check that new block size is supported */
3444     if (!is_power_of_2(in->block_sz) ||
3445         !(in->block_sz & region->supported_blk_size_bitmask)) {
3446         return CXL_MBOX_INVALID_INPUT;
3447     }
3448 
3449     /* Return success if new block size == current block size */
3450     if (in->block_sz == region->block_size) {
3451         return CXL_MBOX_SUCCESS;
3452     }
3453 
3454     /* Free bitmap and create new one for new block size. */
3455     qemu_mutex_lock(&region->bitmap_lock);
3456     g_free(region->blk_bitmap);
3457     region->blk_bitmap = bitmap_new(region->len / in->block_sz);
3458     qemu_mutex_unlock(&region->bitmap_lock);
3459     region->block_size = in->block_sz;
3460 
3461     /* Create event record and insert into event log */
3462     cxl_assign_event_header(&dcEvent.hdr,
3463                             &dynamic_capacity_uuid,
3464                             (1 << CXL_EVENT_TYPE_INFO),
3465                             sizeof(dcEvent),
3466                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
3467     dcEvent.type = DC_EVENT_REGION_CONFIG_UPDATED;
3468     dcEvent.validity_flags = 1;
3469     dcEvent.host_id = 0;
3470     dcEvent.updated_region_id = in->reg_id;
3471 
3472     if (cxl_event_insert(&ct3d->cxl_dstate,
3473                          CXL_EVENT_TYPE_DYNAMIC_CAP,
3474                          (CXLEventRecordRaw *)&dcEvent)) {
3475         cxl_event_irq_assert(ct3d);
3476     }
3477     return CXL_MBOX_SUCCESS;
3478 }
3479 
3480 /* CXL r3.2 section 7.6.7.6.4: Get DC Region Extent Lists (Opcode 5603h) */
cmd_fm_get_dc_region_extent_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3481 static CXLRetCode cmd_fm_get_dc_region_extent_list(const struct cxl_cmd *cmd,
3482                                                    uint8_t *payload_in,
3483                                                    size_t len_in,
3484                                                    uint8_t *payload_out,
3485                                                    size_t *len_out,
3486                                                    CXLCCI *cci)
3487 {
3488     struct {
3489         uint16_t host_id;
3490         uint8_t rsvd[2];
3491         uint32_t extent_cnt;
3492         uint32_t start_extent_id;
3493     } QEMU_PACKED *in = (void *)payload_in;
3494     struct {
3495         uint16_t host_id;
3496         uint8_t rsvd[2];
3497         uint32_t start_extent_id;
3498         uint32_t extents_returned;
3499         uint32_t total_extents;
3500         uint32_t list_generation_num;
3501         uint8_t rsvd2[4];
3502         CXLDCExtentRaw records[];
3503     } QEMU_PACKED *out = (void *)payload_out;
3504     QEMU_BUILD_BUG_ON(sizeof(*in) != 0xc);
3505     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3506     CXLDCExtent *ent;
3507     CXLDCExtentRaw *out_rec;
3508     uint16_t record_count = 0, record_done = 0, i = 0;
3509     uint16_t out_pl_len, max_size;
3510 
3511     if (in->host_id != 0) {
3512         return CXL_MBOX_INVALID_INPUT;
3513     }
3514 
3515     if (in->start_extent_id > ct3d->dc.nr_extents_accepted) {
3516         return CXL_MBOX_INVALID_INPUT;
3517     }
3518 
3519     record_count = MIN(in->extent_cnt,
3520                        ct3d->dc.nr_extents_accepted - in->start_extent_id);
3521     max_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
3522     record_count = MIN(record_count, max_size / sizeof(out->records[0]));
3523     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
3524 
3525     stw_le_p(&out->host_id, in->host_id);
3526     stl_le_p(&out->start_extent_id, in->start_extent_id);
3527     stl_le_p(&out->extents_returned, record_count);
3528     stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted);
3529     stl_le_p(&out->list_generation_num, ct3d->dc.ext_list_gen_seq);
3530 
3531     if (record_count > 0) {
3532         QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
3533             if (i++ < in->start_extent_id) {
3534                 continue;
3535             }
3536             out_rec = &out->records[record_done];
3537             stq_le_p(&out_rec->start_dpa, ent->start_dpa);
3538             stq_le_p(&out_rec->len, ent->len);
3539             memcpy(&out_rec->tag, ent->tag, 0x10);
3540             stw_le_p(&out_rec->shared_seq, ent->shared_seq);
3541 
3542             record_done++;
3543             if (record_done == record_count) {
3544                 break;
3545             }
3546         }
3547     }
3548 
3549     *len_out = out_pl_len;
3550     return CXL_MBOX_SUCCESS;
3551 }
3552 
3553 /*
3554  * Helper function to convert CXLDCExtentRaw to CXLUpdateDCExtentListInPl
3555  * in order to reuse cxl_detect_malformed_extent_list() function which accepts
3556  * CXLUpdateDCExtentListInPl as a parameter.
3557  */
convert_raw_extents(CXLDCExtentRaw raw_extents[],CXLUpdateDCExtentListInPl * extent_list,int count)3558 static void convert_raw_extents(CXLDCExtentRaw raw_extents[],
3559                                 CXLUpdateDCExtentListInPl *extent_list,
3560                                 int count)
3561 {
3562     int i;
3563 
3564     extent_list->num_entries_updated = count;
3565 
3566     for (i = 0; i < count; i++) {
3567         extent_list->updated_entries[i].start_dpa = raw_extents[i].start_dpa;
3568         extent_list->updated_entries[i].len = raw_extents[i].len;
3569     }
3570 }
3571 
3572 /* CXL r3.2 Section 7.6.7.6.5: Initiate Dynamic Capacity Add (Opcode 5604h) */
cmd_fm_initiate_dc_add(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3573 static CXLRetCode cmd_fm_initiate_dc_add(const struct cxl_cmd *cmd,
3574                                          uint8_t *payload_in,
3575                                          size_t len_in,
3576                                          uint8_t *payload_out,
3577                                          size_t *len_out,
3578                                          CXLCCI *cci)
3579 {
3580     struct {
3581         uint16_t host_id;
3582         uint8_t selection_policy;
3583         uint8_t reg_num;
3584         uint64_t length;
3585         uint8_t tag[0x10];
3586         uint32_t ext_count;
3587         CXLDCExtentRaw extents[];
3588     } QEMU_PACKED *in = (void *)payload_in;
3589     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3590     int i, rc;
3591 
3592     switch (in->selection_policy) {
3593         case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE: {
3594             /* Adding extents exceeds device's extent tracking ability. */
3595             if (in->ext_count + ct3d->dc.total_extent_count >
3596                 CXL_NUM_EXTENTS_SUPPORTED) {
3597                 return CXL_MBOX_RESOURCES_EXHAUSTED;
3598             }
3599 
3600             g_autofree CXLUpdateDCExtentListInPl *list =
3601                 g_malloc0(sizeof(*list) +
3602                     in->ext_count * sizeof(*list->updated_entries));
3603 
3604             convert_raw_extents(in->extents, list, in->ext_count);
3605             rc = cxl_detect_malformed_extent_list(ct3d, list);
3606 
3607             for (i = 0; i < in->ext_count; i++) {
3608                 CXLDCExtentRaw *ext = &in->extents[i];
3609 
3610                 /* Check requested extents do not overlap with pending ones. */
3611                 if (cxl_extent_groups_overlaps_dpa_range(&ct3d->dc.extents_pending,
3612                                                          ext->start_dpa,
3613                                                          ext->len)) {
3614                     return CXL_MBOX_INVALID_EXTENT_LIST;
3615                 }
3616                 /* Check requested extents do not overlap with existing ones. */
3617                 if (cxl_extents_overlaps_dpa_range(&ct3d->dc.extents,
3618                                                    ext->start_dpa,
3619                                                    ext->len)) {
3620                     return CXL_MBOX_INVALID_EXTENT_LIST;
3621                 }
3622             }
3623 
3624             if (rc) {
3625                 return rc;
3626             }
3627 
3628             CXLDCExtentGroup *group = NULL;
3629             for (i = 0; i < in->ext_count; i++) {
3630                 CXLDCExtentRaw *ext = &in->extents[i];
3631 
3632                 group = cxl_insert_extent_to_extent_group(group, ext->start_dpa,
3633                                                           ext->len, ext->tag,
3634                                                           ext->shared_seq);
3635             }
3636 
3637             cxl_extent_group_list_insert_tail(&ct3d->dc.extents_pending, group);
3638             ct3d->dc.total_extent_count += in->ext_count;
3639             cxl_create_dc_event_records_for_extents(ct3d,
3640                                                     DC_EVENT_ADD_CAPACITY,
3641                                                     in->extents,
3642                                                     in->ext_count);
3643 
3644             return CXL_MBOX_SUCCESS;
3645         }
3646         default: {
3647             qemu_log_mask(LOG_UNIMP,
3648                           "CXL extent selection policy not supported.\n");
3649             return CXL_MBOX_INVALID_INPUT;
3650         }
3651     }
3652 }
3653 
3654 #define CXL_EXTENT_REMOVAL_POLICY_MASK 0x0F
3655 #define CXL_FORCED_REMOVAL_MASK (1 << 4)
3656 /*
3657  * CXL r3.2 Section 7.6.7.6.6:
3658  * Initiate Dynamic Capacity Release (Opcode 5605h)
3659  */
cmd_fm_initiate_dc_release(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3660 static CXLRetCode cmd_fm_initiate_dc_release(const struct cxl_cmd *cmd,
3661                                              uint8_t *payload_in,
3662                                              size_t len_in,
3663                                              uint8_t *payload_out,
3664                                              size_t *len_out,
3665                                              CXLCCI *cci)
3666 {
3667     struct {
3668         uint16_t host_id;
3669         uint8_t flags;
3670         uint8_t reg_num;
3671         uint64_t length;
3672         uint8_t tag[0x10];
3673         uint32_t ext_count;
3674         CXLDCExtentRaw extents[];
3675     } QEMU_PACKED *in = (void *)payload_in;
3676     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3677     int i, rc;
3678 
3679     switch (in->flags & CXL_EXTENT_REMOVAL_POLICY_MASK) {
3680         case CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE: {
3681             CXLDCExtentList updated_list;
3682             uint32_t updated_list_size;
3683             g_autofree CXLUpdateDCExtentListInPl *list =
3684                 g_malloc0(sizeof(*list) +
3685                     in->ext_count * sizeof(*list->updated_entries));
3686 
3687             convert_raw_extents(in->extents, list, in->ext_count);
3688             rc = cxl_detect_malformed_extent_list(ct3d, list);
3689             if (rc) {
3690                 return rc;
3691             }
3692 
3693             /*
3694              * Fail with Invalid PA if an extent is pending and Forced Removal
3695              * flag not set.
3696              */
3697             if (!(in->flags & CXL_FORCED_REMOVAL_MASK)) {
3698                 for (i = 0; i < in->ext_count; i++) {
3699                     CXLDCExtentRaw ext = in->extents[i];
3700                     /*
3701                      * Check requested extents don't overlap with pending
3702                      * extents.
3703                      */
3704                     if (cxl_extent_groups_overlaps_dpa_range(
3705                             &ct3d->dc.extents_pending,
3706                             ext.start_dpa,
3707                             ext.len)) {
3708                         return CXL_MBOX_INVALID_PA;
3709                     }
3710                 }
3711             }
3712 
3713             rc = cxl_dc_extent_release_dry_run(ct3d,
3714                                                list,
3715                                                &updated_list,
3716                                                &updated_list_size);
3717             if (rc) {
3718                 return rc;
3719             }
3720             cxl_create_dc_event_records_for_extents(ct3d,
3721                                                     DC_EVENT_RELEASE_CAPACITY,
3722                                                     in->extents,
3723                                                     in->ext_count);
3724             return CXL_MBOX_SUCCESS;
3725         }
3726         default: {
3727             qemu_log_mask(LOG_UNIMP,
3728                 "CXL extent removal policy not supported.\n");
3729             return CXL_MBOX_INVALID_INPUT;
3730         }
3731     }
3732 }
3733 
3734 static const struct cxl_cmd cxl_cmd_set[256][256] = {
3735     [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
3736         cmd_infostat_bg_op_abort, 0, 0 },
3737     [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
3738         cmd_events_get_records, 1, 0 },
3739     [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
3740         cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE },
3741     [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
3742                                       cmd_events_get_interrupt_policy, 0, 0 },
3743     [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
3744                                       cmd_events_set_interrupt_policy,
3745                                       ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE },
3746     [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
3747         cmd_firmware_update_get_info, 0, 0 },
3748     [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER",
3749         cmd_firmware_update_transfer, ~0,
3750         CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT },
3751     [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE",
3752         cmd_firmware_update_activate, 2,
3753         CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT },
3754     [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3755     [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
3756                          8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
3757     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
3758                               0, 0 },
3759     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3760     [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED",
3761                                   cmd_features_get_supported, 0x8, 0 },
3762     [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE",
3763                                 cmd_features_get_feature, 0x15, 0 },
3764     [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE",
3765                                 cmd_features_set_feature,
3766                                 ~0,
3767                                 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
3768                                  CXL_MBOX_IMMEDIATE_DATA_CHANGE |
3769                                  CXL_MBOX_IMMEDIATE_POLICY_CHANGE |
3770                                  CXL_MBOX_IMMEDIATE_LOG_CHANGE |
3771                                  CXL_MBOX_SECURITY_STATE_CHANGE)},
3772     [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
3773         cmd_identify_memory_device, 0, 0 },
3774     [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
3775         cmd_ccls_get_partition_info, 0, 0 },
3776     [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
3777     [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
3778         ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE },
3779     [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = {
3780         "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG",
3781         cmd_get_alert_config, 0, 0 },
3782     [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = {
3783         "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG",
3784         cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
3785     [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
3786         (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
3787          CXL_MBOX_SECURITY_STATE_CHANGE |
3788          CXL_MBOX_BACKGROUND_OPERATION |
3789          CXL_MBOX_BACKGROUND_OPERATION_ABORT)},
3790     [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations,
3791         ~0,
3792         (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
3793          CXL_MBOX_BACKGROUND_OPERATION)},
3794     [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
3795         cmd_get_security_state, 0, 0 },
3796     [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
3797         cmd_media_get_poison_list, 16, 0 },
3798     [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
3799         cmd_media_inject_poison, 8, 0 },
3800     [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
3801         cmd_media_clear_poison, 72, 0 },
3802     [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = {
3803         "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
3804         cmd_media_get_scan_media_capabilities, 16, 0 },
3805     [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA",
3806         cmd_media_scan_media, 17,
3807         (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)},
3808     [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = {
3809         "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
3810         cmd_media_get_scan_media_results, 0, 0 },
3811 };
3812 
3813 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
3814     [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG",
3815         cmd_dcd_get_dyn_cap_config, 2, 0 },
3816     [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = {
3817         "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list,
3818         8, 0 },
3819     [DCD_CONFIG][ADD_DYN_CAP_RSP] = {
3820         "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp,
3821         ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
3822     [DCD_CONFIG][RELEASE_DYN_CAP] = {
3823         "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap,
3824         ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
3825 };
3826 
3827 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
3828     [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
3829     [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
3830         cmd_infostat_bg_op_sts, 0, 0 },
3831     [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
3832         cmd_infostat_bg_op_abort, 0, 0 },
3833     [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3834     [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8,
3835                          CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
3836     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3837                               0 },
3838     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3839     [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
3840         cmd_identify_switch_device, 0, 0 },
3841     [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS",
3842         cmd_get_physical_port_state, ~0, 0 },
3843     [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
3844                                      cmd_tunnel_management_cmd, ~0, 0 },
3845 };
3846 
3847 static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = {
3848     [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO",
3849         cmd_fm_get_dcd_info, 0, 0 },
3850     [FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG",
3851         cmd_fm_get_host_dc_region_config, 4, 0 },
3852     [FMAPI_DCD_MGMT][SET_DC_REGION_CONFIG] = { "SET_DC_REGION_CONFIG",
3853         cmd_fm_set_dc_region_config, 16,
3854         (CXL_MBOX_CONFIG_CHANGE_COLD_RESET |
3855          CXL_MBOX_CONFIG_CHANGE_CONV_RESET |
3856          CXL_MBOX_CONFIG_CHANGE_CXL_RESET |
3857          CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
3858          CXL_MBOX_IMMEDIATE_DATA_CHANGE) },
3859     [FMAPI_DCD_MGMT][GET_DC_REGION_EXTENT_LIST] = { "GET_DC_REGION_EXTENT_LIST",
3860         cmd_fm_get_dc_region_extent_list, 12, 0 },
3861     [FMAPI_DCD_MGMT][INITIATE_DC_ADD] = { "INIT_DC_ADD",
3862         cmd_fm_initiate_dc_add, ~0,
3863         (CXL_MBOX_CONFIG_CHANGE_COLD_RESET |
3864         CXL_MBOX_CONFIG_CHANGE_CONV_RESET |
3865         CXL_MBOX_CONFIG_CHANGE_CXL_RESET |
3866         CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
3867         CXL_MBOX_IMMEDIATE_DATA_CHANGE) },
3868     [FMAPI_DCD_MGMT][INITIATE_DC_RELEASE] = { "INIT_DC_RELEASE",
3869         cmd_fm_initiate_dc_release, ~0,
3870         (CXL_MBOX_CONFIG_CHANGE_COLD_RESET |
3871          CXL_MBOX_CONFIG_CHANGE_CONV_RESET |
3872          CXL_MBOX_CONFIG_CHANGE_CXL_RESET |
3873          CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
3874          CXL_MBOX_IMMEDIATE_DATA_CHANGE) },
3875 };
3876 
3877 /*
3878  * While the command is executing in the background, the device should
3879  * update the percentage complete in the Background Command Status Register
3880  * at least once per second.
3881  */
3882 
3883 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
3884 
cxl_process_cci_message(CXLCCI * cci,uint8_t set,uint8_t cmd,size_t len_in,uint8_t * pl_in,size_t * len_out,uint8_t * pl_out,bool * bg_started)3885 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
3886                             size_t len_in, uint8_t *pl_in, size_t *len_out,
3887                             uint8_t *pl_out, bool *bg_started)
3888 {
3889     int ret;
3890     const struct cxl_cmd *cxl_cmd;
3891     opcode_handler h;
3892     CXLDeviceState *cxl_dstate;
3893 
3894     *len_out = 0;
3895     cxl_cmd = &cci->cxl_cmd_set[set][cmd];
3896     h = cxl_cmd->handler;
3897     if (!h) {
3898         qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
3899                       set << 8 | cmd);
3900         return CXL_MBOX_UNSUPPORTED;
3901     }
3902 
3903     if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
3904         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3905     }
3906 
3907     /* Only one bg command at a time */
3908     if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
3909         cci->bg.runtime > 0) {
3910         return CXL_MBOX_BUSY;
3911     }
3912 
3913     /* forbid any selected commands while the media is disabled */
3914     if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
3915         cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
3916 
3917         if (cxl_dev_media_disabled(cxl_dstate)) {
3918             if (h == cmd_events_get_records ||
3919                 h == cmd_ccls_get_partition_info ||
3920                 h == cmd_ccls_set_lsa ||
3921                 h == cmd_ccls_get_lsa ||
3922                 h == cmd_logs_get_log ||
3923                 h == cmd_media_get_poison_list ||
3924                 h == cmd_media_inject_poison ||
3925                 h == cmd_media_clear_poison ||
3926                 h == cmd_sanitize_overwrite ||
3927                 h == cmd_firmware_update_transfer ||
3928                 h == cmd_firmware_update_activate) {
3929                 return CXL_MBOX_MEDIA_DISABLED;
3930             }
3931         }
3932     }
3933 
3934     ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
3935     if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
3936         ret == CXL_MBOX_BG_STARTED) {
3937         *bg_started = true;
3938     } else {
3939         *bg_started = false;
3940     }
3941 
3942     /* Set bg and the return code */
3943     if (*bg_started) {
3944         uint64_t now;
3945 
3946         cci->bg.opcode = (set << 8) | cmd;
3947 
3948         cci->bg.complete_pct = 0;
3949         cci->bg.aborted = false;
3950         cci->bg.ret_code = 0;
3951 
3952         now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
3953         cci->bg.starttime = now;
3954         timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
3955     }
3956 
3957     return ret;
3958 }
3959 
bg_timercb(void * opaque)3960 static void bg_timercb(void *opaque)
3961 {
3962     CXLCCI *cci = opaque;
3963     uint64_t now, total_time;
3964 
3965     qemu_mutex_lock(&cci->bg.lock);
3966 
3967     now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
3968     total_time = cci->bg.starttime + cci->bg.runtime;
3969 
3970     if (now >= total_time) { /* we are done */
3971         uint16_t ret = CXL_MBOX_SUCCESS;
3972 
3973         cci->bg.complete_pct = 100;
3974         cci->bg.ret_code = ret;
3975         switch (cci->bg.opcode) {
3976         case 0x0201: /* fw transfer */
3977             __do_firmware_xfer(cci);
3978             break;
3979         case 0x4400: /* sanitize */
3980         {
3981             CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3982 
3983             __do_sanitization(ct3d);
3984             cxl_dev_enable_media(&ct3d->cxl_dstate);
3985         }
3986         break;
3987         case 0x4402: /* Media Operations sanitize */
3988         {
3989             CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3990             __do_sanitize(ct3d);
3991         }
3992         break;
3993         case 0x4304: /* scan media */
3994         {
3995             CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3996 
3997             __do_scan_media(ct3d);
3998             break;
3999         }
4000         default:
4001             __builtin_unreachable();
4002             break;
4003         }
4004     } else {
4005         /* estimate only */
4006         cci->bg.complete_pct =
4007             100 * (now - cci->bg.starttime) / cci->bg.runtime;
4008         timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
4009     }
4010 
4011     if (cci->bg.complete_pct == 100) {
4012         /* TODO: generalize to switch CCI */
4013         CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
4014         CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
4015         PCIDevice *pdev = PCI_DEVICE(cci->d);
4016 
4017         cci->bg.starttime = 0;
4018         /* registers are updated, allow new bg-capable cmds */
4019         cci->bg.runtime = 0;
4020 
4021         if (msix_enabled(pdev)) {
4022             msix_notify(pdev, cxl_dstate->mbox_msi_n);
4023         } else if (msi_enabled(pdev)) {
4024             msi_notify(pdev, cxl_dstate->mbox_msi_n);
4025         }
4026     }
4027 
4028     qemu_mutex_unlock(&cci->bg.lock);
4029 }
4030 
cxl_rebuild_cel(CXLCCI * cci)4031 static void cxl_rebuild_cel(CXLCCI *cci)
4032 {
4033     cci->cel_size = 0; /* Reset for a fresh build */
4034     for (int set = 0; set < 256; set++) {
4035         for (int cmd = 0; cmd < 256; cmd++) {
4036             if (cci->cxl_cmd_set[set][cmd].handler) {
4037                 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
4038                 struct cel_log *log =
4039                     &cci->cel_log[cci->cel_size];
4040 
4041                 log->opcode = (set << 8) | cmd;
4042                 log->effect = c->effect;
4043                 cci->cel_size++;
4044             }
4045         }
4046     }
4047 }
4048 
cxl_init_cci(CXLCCI * cci,size_t payload_max)4049 void cxl_init_cci(CXLCCI *cci, size_t payload_max)
4050 {
4051     cci->payload_max = payload_max;
4052     cxl_rebuild_cel(cci);
4053 
4054     cci->bg.complete_pct = 0;
4055     cci->bg.starttime = 0;
4056     cci->bg.runtime = 0;
4057     cci->bg.aborted = false;
4058     cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
4059                                  bg_timercb, cci);
4060     qemu_mutex_init(&cci->bg.lock);
4061 
4062     memset(&cci->fw, 0, sizeof(cci->fw));
4063     cci->fw.active_slot = 1;
4064     cci->fw.slot[cci->fw.active_slot - 1] = true;
4065     cci->initialized = true;
4066 }
4067 
cxl_destroy_cci(CXLCCI * cci)4068 void cxl_destroy_cci(CXLCCI *cci)
4069 {
4070     qemu_mutex_destroy(&cci->bg.lock);
4071     cci->initialized = false;
4072 }
4073 
cxl_copy_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmds)[256])4074 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256])
4075 {
4076     for (int set = 0; set < 256; set++) {
4077         for (int cmd = 0; cmd < 256; cmd++) {
4078             if (cxl_cmds[set][cmd].handler) {
4079                 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd];
4080             }
4081         }
4082     }
4083 }
4084 
cxl_add_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmd_set)[256],size_t payload_max)4085 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
4086                                  size_t payload_max)
4087 {
4088     cci->payload_max = MAX(payload_max, cci->payload_max);
4089     cxl_copy_cci_commands(cci, cxl_cmd_set);
4090     cxl_rebuild_cel(cci);
4091 }
4092 
cxl_initialize_mailbox_swcci(CXLCCI * cci,DeviceState * intf,DeviceState * d,size_t payload_max)4093 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
4094                                   DeviceState *d, size_t payload_max)
4095 {
4096     cxl_copy_cci_commands(cci, cxl_cmd_set_sw);
4097     cci->d = d;
4098     cci->intf = intf;
4099     cxl_init_cci(cci, payload_max);
4100 }
4101 
cxl_initialize_mailbox_t3(CXLCCI * cci,DeviceState * d,size_t payload_max)4102 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
4103 {
4104     CXLType3Dev *ct3d = CXL_TYPE3(d);
4105 
4106     cxl_copy_cci_commands(cci, cxl_cmd_set);
4107     if (ct3d->dc.num_regions) {
4108         cxl_copy_cci_commands(cci, cxl_cmd_set_dcd);
4109     }
4110     cci->d = d;
4111 
4112     /* No separation for PCI MB as protocol handled in PCI device */
4113     cci->intf = d;
4114     cxl_init_cci(cci, payload_max);
4115 }
4116 
4117 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = {
4118     [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
4119     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
4120                               0 },
4121     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
4122 };
4123 
cxl_initialize_t3_ld_cci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)4124 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
4125                                size_t payload_max)
4126 {
4127     cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld);
4128     cci->d = d;
4129     cci->intf = intf;
4130     cxl_init_cci(cci, payload_max);
4131 }
4132 
4133 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
4134     [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0,  0},
4135     [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT",
4136                                            cmd_get_response_msg_limit, 0, 0 },
4137     [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT",
4138                                            cmd_set_response_msg_limit, 1, 0 },
4139     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
4140                               0 },
4141     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
4142     [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
4143     [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
4144                                      cmd_tunnel_management_cmd, ~0, 0 },
4145 };
4146 
cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)4147 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
4148                                            DeviceState *intf,
4149                                            size_t payload_max)
4150 {
4151     CXLType3Dev *ct3d = CXL_TYPE3(d);
4152 
4153     cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp);
4154     if (ct3d->dc.num_regions) {
4155         cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd);
4156     }
4157     cci->d = d;
4158     cci->intf = intf;
4159     cxl_init_cci(cci, payload_max);
4160 }
4161