xref: /openbmc/qemu/hw/cxl/cxl-device-utils.c (revision fc58891d)
1 /*
2  * CXL Utility library for devices
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
13 
14 /*
15  * Device registers have no restrictions per the spec, and so fall back to the
16  * default memory mapped register rules in 8.2:
17  *   Software shall use CXL.io Memory Read and Write to access memory mapped
18  *   register defined in this section. Unless otherwise specified, software
19  *   shall restrict the accesses width based on the following:
20  *   • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21  *     quantity.
22  *   • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23  *     Bytes
24  *   • The address shall be a multiple of the access width, e.g. when
25  *     accessing a register as a 4 Byte quantity, the address shall be
26  *     multiple of 4.
27  *   • The accesses shall map to contiguous bytes.If these rules are not
28  *     followed, the behavior is undefined
29  */
30 
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
32 {
33     CXLDeviceState *cxl_dstate = opaque;
34 
35     switch (size) {
36     case 4:
37         return cxl_dstate->caps_reg_state32[offset / size];
38     case 8:
39         return cxl_dstate->caps_reg_state64[offset / size];
40     default:
41         g_assert_not_reached();
42     }
43 }
44 
45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
46 {
47     CXLDeviceState *cxl_dstate = opaque;
48 
49     switch (size) {
50     case 1:
51         return cxl_dstate->dev_reg_state[offset];
52     case 2:
53         return cxl_dstate->dev_reg_state16[offset / size];
54     case 4:
55         return cxl_dstate->dev_reg_state32[offset / size];
56     case 8:
57         return cxl_dstate->dev_reg_state64[offset / size];
58     default:
59         g_assert_not_reached();
60     }
61 }
62 
63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
64 {
65     CXLDeviceState *cxl_dstate;
66     CXLCCI *cci = opaque;
67 
68     if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
69         cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
70     } else if (object_dynamic_cast(OBJECT(cci->intf),
71                                    TYPE_CXL_SWITCH_MAILBOX_CCI)) {
72         cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
73     } else {
74         return 0;
75     }
76 
77     switch (size) {
78     case 1:
79         return cxl_dstate->mbox_reg_state[offset];
80     case 2:
81         return cxl_dstate->mbox_reg_state16[offset / size];
82     case 4:
83         return cxl_dstate->mbox_reg_state32[offset / size];
84     case 8:
85         if (offset == A_CXL_DEV_BG_CMD_STS) {
86             uint64_t bg_status_reg;
87             bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP,
88                                        cci->bg.opcode);
89             bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
90                                        PERCENTAGE_COMP, cci->bg.complete_pct);
91             bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
92                                        RET_CODE, cci->bg.ret_code);
93             /* endian? */
94             cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg;
95         }
96         if (offset == A_CXL_DEV_MAILBOX_STS) {
97             uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size];
98             if (cci->bg.complete_pct) {
99                 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
100                                         0);
101                 cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
102             }
103         }
104         return cxl_dstate->mbox_reg_state64[offset / size];
105     default:
106         g_assert_not_reached();
107     }
108 }
109 
110 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
111                                uint64_t value)
112 {
113     switch (offset) {
114     case A_CXL_DEV_MAILBOX_CTRL:
115         /* fallthrough */
116     case A_CXL_DEV_MAILBOX_CAP:
117         /* RO register */
118         break;
119     default:
120         qemu_log_mask(LOG_UNIMP,
121                       "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
122                       __func__, offset);
123         return;
124     }
125 
126     reg_state[offset / sizeof(*reg_state)] = value;
127 }
128 
129 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
130                                uint64_t value)
131 {
132     switch (offset) {
133     case A_CXL_DEV_MAILBOX_CMD:
134         break;
135     case A_CXL_DEV_BG_CMD_STS:
136         break;
137     case A_CXL_DEV_MAILBOX_STS:
138         /* Read only register, will get updated by the state machine */
139         return;
140     default:
141         qemu_log_mask(LOG_UNIMP,
142                       "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
143                       __func__, offset);
144         return;
145     }
146 
147 
148     reg_state[offset / sizeof(*reg_state)] = value;
149 }
150 
151 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
152                               unsigned size)
153 {
154     CXLDeviceState *cxl_dstate;
155     CXLCCI *cci = opaque;
156 
157     if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
158         cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
159     } else if (object_dynamic_cast(OBJECT(cci->intf),
160                                    TYPE_CXL_SWITCH_MAILBOX_CCI)) {
161         cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
162     } else {
163         return;
164     }
165 
166     if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
167         memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
168         return;
169     }
170 
171     switch (size) {
172     case 4:
173         mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
174         break;
175     case 8:
176         mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
177         break;
178     default:
179         g_assert_not_reached();
180     }
181 
182     if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
183                          DOORBELL)) {
184         uint64_t command_reg =
185             cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
186         uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD,
187                                      COMMAND_SET);
188         uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
189         size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
190         uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD;
191         /*
192          * Copy taken to avoid need for individual command handlers to care
193          * about aliasing.
194          */
195         g_autofree uint8_t *pl_in_copy = NULL;
196         size_t len_out = 0;
197         uint64_t status_reg;
198         bool bg_started = false;
199         int rc;
200 
201         pl_in_copy = g_memdup2(pl, len_in);
202         if (len_in == 0 || pl_in_copy) {
203             /* Avoid stale data  - including from earlier cmds */
204             memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE);
205             rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy,
206                                          &len_out, pl, &bg_started);
207         } else {
208             rc = CXL_MBOX_INTERNAL_ERROR;
209         }
210 
211         /* Set bg and the return code */
212         status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP,
213                                 bg_started ? 1 : 0);
214         status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc);
215         /* Set the return length */
216         command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set);
217         command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
218                                  COMMAND, cmd);
219         command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
220                                  LENGTH, len_out);
221 
222         cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
223         cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
224         /* Tell the host we're done */
225         ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
226                          DOORBELL, 0);
227     }
228 }
229 
230 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
231 {
232     uint64_t retval = 0;
233 
234     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
235     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
236 
237     return retval;
238 }
239 
240 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
241                            unsigned size)
242 {
243     /* Many register sets are read only */
244 }
245 
246 static const MemoryRegionOps mdev_ops = {
247     .read = mdev_reg_read,
248     .write = ro_reg_write,
249     .endianness = DEVICE_LITTLE_ENDIAN,
250     .valid = {
251         .min_access_size = 1,
252         .max_access_size = 8,
253         .unaligned = false,
254     },
255     .impl = {
256         .min_access_size = 8,
257         .max_access_size = 8,
258     },
259 };
260 
261 static const MemoryRegionOps mailbox_ops = {
262     .read = mailbox_reg_read,
263     .write = mailbox_reg_write,
264     .endianness = DEVICE_LITTLE_ENDIAN,
265     .valid = {
266         .min_access_size = 1,
267         .max_access_size = 8,
268         .unaligned = false,
269     },
270     .impl = {
271         .min_access_size = 1,
272         .max_access_size = 8,
273     },
274 };
275 
276 static const MemoryRegionOps dev_ops = {
277     .read = dev_reg_read,
278     .write = ro_reg_write,
279     .endianness = DEVICE_LITTLE_ENDIAN,
280     .valid = {
281         .min_access_size = 1,
282         .max_access_size = 8,
283         .unaligned = false,
284     },
285     .impl = {
286         .min_access_size = 1,
287         .max_access_size = 8,
288     },
289 };
290 
291 static const MemoryRegionOps caps_ops = {
292     .read = caps_reg_read,
293     .write = ro_reg_write,
294     .endianness = DEVICE_LITTLE_ENDIAN,
295     .valid = {
296         .min_access_size = 1,
297         .max_access_size = 8,
298         .unaligned = false,
299     },
300     .impl = {
301         .min_access_size = 4,
302         .max_access_size = 8,
303     },
304 };
305 
306 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate,
307                                     CXLCCI *cci)
308 {
309     /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
310     memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
311                        pow2ceil(CXL_MMIO_SIZE));
312 
313     memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
314                           "cap-array", CXL_CAPS_SIZE);
315     memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
316                           "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
317     memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci,
318                           "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
319     memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
320                           cxl_dstate, "memory device caps",
321                           CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
322 
323     memory_region_add_subregion(&cxl_dstate->device_registers, 0,
324                                 &cxl_dstate->caps);
325     memory_region_add_subregion(&cxl_dstate->device_registers,
326                                 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
327                                 &cxl_dstate->device);
328     memory_region_add_subregion(&cxl_dstate->device_registers,
329                                 CXL_MAILBOX_REGISTERS_OFFSET,
330                                 &cxl_dstate->mailbox);
331     memory_region_add_subregion(&cxl_dstate->device_registers,
332                                 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
333                                 &cxl_dstate->memory_device);
334 }
335 
336 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
337                           bool available)
338 {
339     if (available) {
340         cxl_dstate->event_status |= (1 << log_type);
341     } else {
342         cxl_dstate->event_status &= ~(1 << log_type);
343     }
344 
345     ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
346                      EVENT_STATUS, cxl_dstate->event_status);
347 }
348 
349 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
350 {
351     CXLEventLogType log;
352 
353     for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
354         cxl_event_set_status(cxl_dstate, log, false);
355     }
356 }
357 
358 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
359 {
360     const uint8_t msi_n = 9;
361 
362     /* 2048 payload size */
363     ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
364                      PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
365     cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
366     /* irq support */
367     ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
368                      BG_INT_CAP, 1);
369     ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
370                      MSI_N, msi_n);
371     cxl_dstate->mbox_msi_n = msi_n;
372 }
373 
374 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
375 
376 void cxl_device_register_init_t3(CXLType3Dev *ct3d)
377 {
378     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
379     uint64_t *cap_h = cxl_dstate->caps_reg_state64;
380     const int cap_count = 3;
381 
382     /* CXL Device Capabilities Array Register */
383     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
384     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
385     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
386 
387     cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
388     device_reg_init_common(cxl_dstate);
389 
390     cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
391     mailbox_reg_init_common(cxl_dstate);
392 
393     cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
394     memdev_reg_init_common(cxl_dstate);
395 
396     cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
397                               CXL_MAILBOX_MAX_PAYLOAD_SIZE);
398 }
399 
400 void cxl_device_register_init_swcci(CSWMBCCIDev *sw)
401 {
402     CXLDeviceState *cxl_dstate = &sw->cxl_dstate;
403     uint64_t *cap_h = cxl_dstate->caps_reg_state64;
404     const int cap_count = 3;
405 
406     /* CXL Device Capabilities Array Register */
407     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
408     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
409     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
410 
411     cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
412     device_reg_init_common(cxl_dstate);
413 
414     cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
415     mailbox_reg_init_common(cxl_dstate);
416 
417     cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
418     memdev_reg_init_common(cxl_dstate);
419 }
420 
421 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
422 {
423     uint64_t time, delta;
424     uint64_t final_time = 0;
425 
426     if (cxl_dstate->timestamp.set) {
427         /* Find the delta from the last time the host set the time. */
428         time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
429         delta = time - cxl_dstate->timestamp.last_set;
430         final_time = cxl_dstate->timestamp.host_set + delta;
431     }
432 
433     return final_time;
434 }
435