xref: /openbmc/qemu/hw/cxl/cxl-device-utils.c (revision 89aafcf2)
1 /*
2  * CXL Utility library for devices
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
13 
14 /*
15  * Device registers have no restrictions per the spec, and so fall back to the
16  * default memory mapped register rules in 8.2:
17  *   Software shall use CXL.io Memory Read and Write to access memory mapped
18  *   register defined in this section. Unless otherwise specified, software
19  *   shall restrict the accesses width based on the following:
20  *   • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21  *     quantity.
22  *   • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23  *     Bytes
24  *   • The address shall be a multiple of the access width, e.g. when
25  *     accessing a register as a 4 Byte quantity, the address shall be
26  *     multiple of 4.
27  *   • The accesses shall map to contiguous bytes.If these rules are not
28  *     followed, the behavior is undefined
29  */
30 
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
32 {
33     CXLDeviceState *cxl_dstate = opaque;
34 
35     if (size == 4) {
36         return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)];
37     } else {
38         return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)];
39     }
40 }
41 
42 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
43 {
44     return 0;
45 }
46 
47 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
48 {
49     CXLDeviceState *cxl_dstate = opaque;
50 
51     switch (size) {
52     case 1:
53         return cxl_dstate->mbox_reg_state[offset];
54     case 2:
55         return cxl_dstate->mbox_reg_state16[offset / size];
56     case 4:
57         return cxl_dstate->mbox_reg_state32[offset / size];
58     case 8:
59         return cxl_dstate->mbox_reg_state64[offset / size];
60     default:
61         g_assert_not_reached();
62     }
63 }
64 
65 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
66                                uint64_t value)
67 {
68     switch (offset) {
69     case A_CXL_DEV_MAILBOX_CTRL:
70         /* fallthrough */
71     case A_CXL_DEV_MAILBOX_CAP:
72         /* RO register */
73         break;
74     default:
75         qemu_log_mask(LOG_UNIMP,
76                       "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
77                       __func__, offset);
78         return;
79     }
80 
81     reg_state[offset / sizeof(*reg_state)] = value;
82 }
83 
84 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
85                                uint64_t value)
86 {
87     switch (offset) {
88     case A_CXL_DEV_MAILBOX_CMD:
89         break;
90     case A_CXL_DEV_BG_CMD_STS:
91         /* BG not supported */
92         /* fallthrough */
93     case A_CXL_DEV_MAILBOX_STS:
94         /* Read only register, will get updated by the state machine */
95         return;
96     default:
97         qemu_log_mask(LOG_UNIMP,
98                       "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
99                       __func__, offset);
100         return;
101     }
102 
103 
104     reg_state[offset / sizeof(*reg_state)] = value;
105 }
106 
107 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
108                               unsigned size)
109 {
110     CXLDeviceState *cxl_dstate = opaque;
111 
112     if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
113         memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
114         return;
115     }
116 
117     switch (size) {
118     case 4:
119         mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
120         break;
121     case 8:
122         mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
123         break;
124     default:
125         g_assert_not_reached();
126     }
127 
128     if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
129                          DOORBELL)) {
130         cxl_process_mailbox(cxl_dstate);
131     }
132 }
133 
134 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
135 {
136     uint64_t retval = 0;
137 
138     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
139     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
140 
141     return retval;
142 }
143 
144 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
145                            unsigned size)
146 {
147     /* Many register sets are read only */
148 }
149 
150 static const MemoryRegionOps mdev_ops = {
151     .read = mdev_reg_read,
152     .write = ro_reg_write,
153     .endianness = DEVICE_LITTLE_ENDIAN,
154     .valid = {
155         .min_access_size = 1,
156         .max_access_size = 8,
157         .unaligned = false,
158     },
159     .impl = {
160         .min_access_size = 8,
161         .max_access_size = 8,
162     },
163 };
164 
165 static const MemoryRegionOps mailbox_ops = {
166     .read = mailbox_reg_read,
167     .write = mailbox_reg_write,
168     .endianness = DEVICE_LITTLE_ENDIAN,
169     .valid = {
170         .min_access_size = 1,
171         .max_access_size = 8,
172         .unaligned = false,
173     },
174     .impl = {
175         .min_access_size = 1,
176         .max_access_size = 8,
177     },
178 };
179 
180 static const MemoryRegionOps dev_ops = {
181     .read = dev_reg_read,
182     .write = ro_reg_write,
183     .endianness = DEVICE_LITTLE_ENDIAN,
184     .valid = {
185         .min_access_size = 1,
186         .max_access_size = 8,
187         .unaligned = false,
188     },
189     .impl = {
190         .min_access_size = 1,
191         .max_access_size = 8,
192     },
193 };
194 
195 static const MemoryRegionOps caps_ops = {
196     .read = caps_reg_read,
197     .write = ro_reg_write,
198     .endianness = DEVICE_LITTLE_ENDIAN,
199     .valid = {
200         .min_access_size = 1,
201         .max_access_size = 8,
202         .unaligned = false,
203     },
204     .impl = {
205         .min_access_size = 4,
206         .max_access_size = 8,
207     },
208 };
209 
210 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate)
211 {
212     /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
213     memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
214                        pow2ceil(CXL_MMIO_SIZE));
215 
216     memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
217                           "cap-array", CXL_CAPS_SIZE);
218     memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
219                           "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
220     memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate,
221                           "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
222     memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
223                           cxl_dstate, "memory device caps",
224                           CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
225 
226     memory_region_add_subregion(&cxl_dstate->device_registers, 0,
227                                 &cxl_dstate->caps);
228     memory_region_add_subregion(&cxl_dstate->device_registers,
229                                 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
230                                 &cxl_dstate->device);
231     memory_region_add_subregion(&cxl_dstate->device_registers,
232                                 CXL_MAILBOX_REGISTERS_OFFSET,
233                                 &cxl_dstate->mailbox);
234     memory_region_add_subregion(&cxl_dstate->device_registers,
235                                 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
236                                 &cxl_dstate->memory_device);
237 }
238 
239 static void device_reg_init_common(CXLDeviceState *cxl_dstate) { }
240 
241 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
242 {
243     /* 2048 payload size, with no interrupt or background support */
244     ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
245                      PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
246     cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
247 }
248 
249 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
250 
251 void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
252 {
253     uint64_t *cap_hdrs = cxl_dstate->caps_reg_state64;
254     const int cap_count = 3;
255 
256     /* CXL Device Capabilities Array Register */
257     ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
258     ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
259     ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
260 
261     cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1);
262     device_reg_init_common(cxl_dstate);
263 
264     cxl_device_cap_init(cxl_dstate, MAILBOX, 2);
265     mailbox_reg_init_common(cxl_dstate);
266 
267     cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000);
268     memdev_reg_init_common(cxl_dstate);
269 
270     cxl_initialize_mailbox(cxl_dstate);
271 }
272 
273 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
274 {
275     uint64_t time, delta;
276     uint64_t final_time = 0;
277 
278     if (cxl_dstate->timestamp.set) {
279         /* Find the delta from the last time the host set the time. */
280         time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
281         delta = time - cxl_dstate->timestamp.last_set;
282         final_time = cxl_dstate->timestamp.host_set + delta;
283     }
284 
285     return final_time;
286 }
287