1 /*
2 * CXL Event processing
3 *
4 * Copyright(C) 2023 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/error-report.h"
12 #include "hw/pci/msi.h"
13 #include "hw/pci/msix.h"
14 #include "hw/cxl/cxl.h"
15 #include "hw/cxl/cxl_events.h"
16
17 /* Artificial limit on the number of events a log can hold */
18 #define CXL_TEST_EVENT_OVERFLOW 8
19
reset_overflow(CXLEventLog * log)20 static void reset_overflow(CXLEventLog *log)
21 {
22 log->overflow_err_count = 0;
23 log->first_overflow_timestamp = 0;
24 log->last_overflow_timestamp = 0;
25 }
26
cxl_event_init(CXLDeviceState * cxlds,int start_msg_num)27 void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num)
28 {
29 CXLEventLog *log;
30 int i;
31
32 for (i = 0; i < CXL_EVENT_TYPE_MAX; i++) {
33 log = &cxlds->event_logs[i];
34 log->next_handle = 1;
35 log->overflow_err_count = 0;
36 log->first_overflow_timestamp = 0;
37 log->last_overflow_timestamp = 0;
38 log->irq_enabled = false;
39 log->irq_vec = start_msg_num++;
40 qemu_mutex_init(&log->lock);
41 QSIMPLEQ_INIT(&log->events);
42 }
43
44 /* Override -- Dynamic Capacity uses the same vector as info */
45 cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP].irq_vec =
46 cxlds->event_logs[CXL_EVENT_TYPE_INFO].irq_vec;
47
48 }
49
cxl_event_get_head(CXLEventLog * log)50 static CXLEvent *cxl_event_get_head(CXLEventLog *log)
51 {
52 return QSIMPLEQ_FIRST(&log->events);
53 }
54
cxl_event_get_next(CXLEvent * entry)55 static CXLEvent *cxl_event_get_next(CXLEvent *entry)
56 {
57 return QSIMPLEQ_NEXT(entry, node);
58 }
59
cxl_event_count(CXLEventLog * log)60 static int cxl_event_count(CXLEventLog *log)
61 {
62 CXLEvent *event;
63 int rc = 0;
64
65 QSIMPLEQ_FOREACH(event, &log->events, node) {
66 rc++;
67 }
68
69 return rc;
70 }
71
cxl_event_empty(CXLEventLog * log)72 static bool cxl_event_empty(CXLEventLog *log)
73 {
74 return QSIMPLEQ_EMPTY(&log->events);
75 }
76
cxl_event_delete_head(CXLDeviceState * cxlds,CXLEventLogType log_type,CXLEventLog * log)77 static void cxl_event_delete_head(CXLDeviceState *cxlds,
78 CXLEventLogType log_type,
79 CXLEventLog *log)
80 {
81 CXLEvent *entry = cxl_event_get_head(log);
82
83 reset_overflow(log);
84 QSIMPLEQ_REMOVE_HEAD(&log->events, node);
85 if (cxl_event_empty(log)) {
86 cxl_event_set_status(cxlds, log_type, false);
87 }
88 g_free(entry);
89 }
90
91 /*
92 * return true if an interrupt should be generated as a result
93 * of inserting this event.
94 */
cxl_event_insert(CXLDeviceState * cxlds,CXLEventLogType log_type,CXLEventRecordRaw * event)95 bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type,
96 CXLEventRecordRaw *event)
97 {
98 uint64_t time;
99 CXLEventLog *log;
100 CXLEvent *entry;
101
102 if (log_type >= CXL_EVENT_TYPE_MAX) {
103 return false;
104 }
105
106 time = cxl_device_get_timestamp(cxlds);
107
108 log = &cxlds->event_logs[log_type];
109
110 QEMU_LOCK_GUARD(&log->lock);
111
112 if (cxl_event_count(log) >= CXL_TEST_EVENT_OVERFLOW) {
113 if (log->overflow_err_count == 0) {
114 log->first_overflow_timestamp = time;
115 }
116 log->overflow_err_count++;
117 log->last_overflow_timestamp = time;
118 return false;
119 }
120
121 entry = g_new0(CXLEvent, 1);
122
123 memcpy(&entry->data, event, sizeof(*event));
124
125 entry->data.hdr.handle = cpu_to_le16(log->next_handle);
126 log->next_handle++;
127 /* 0 handle is never valid */
128 if (log->next_handle == 0) {
129 log->next_handle++;
130 }
131 entry->data.hdr.timestamp = cpu_to_le64(time);
132
133 QSIMPLEQ_INSERT_TAIL(&log->events, entry, node);
134 cxl_event_set_status(cxlds, log_type, true);
135
136 /* Count went from 0 to 1 */
137 return cxl_event_count(log) == 1;
138 }
139
cxl_discard_all_event_records(CXLDeviceState * cxlds)140 void cxl_discard_all_event_records(CXLDeviceState *cxlds)
141 {
142 CXLEventLogType log_type;
143 CXLEventLog *log;
144
145 for (log_type = 0; log_type < CXL_EVENT_TYPE_MAX; log_type++) {
146 log = &cxlds->event_logs[log_type];
147 while (!cxl_event_empty(log)) {
148 cxl_event_delete_head(cxlds, log_type, log);
149 }
150 }
151 }
152
cxl_event_get_records(CXLDeviceState * cxlds,CXLGetEventPayload * pl,uint8_t log_type,int max_recs,size_t * len)153 CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl,
154 uint8_t log_type, int max_recs,
155 size_t *len)
156 {
157 CXLEventLog *log;
158 CXLEvent *entry;
159 uint16_t nr;
160
161 if (log_type >= CXL_EVENT_TYPE_MAX) {
162 return CXL_MBOX_INVALID_INPUT;
163 }
164
165 log = &cxlds->event_logs[log_type];
166
167 QEMU_LOCK_GUARD(&log->lock);
168
169 entry = cxl_event_get_head(log);
170 for (nr = 0; entry && nr < max_recs; nr++) {
171 memcpy(&pl->records[nr], &entry->data, CXL_EVENT_RECORD_SIZE);
172 entry = cxl_event_get_next(entry);
173 }
174
175 if (!cxl_event_empty(log)) {
176 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
177 }
178
179 if (log->overflow_err_count) {
180 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
181 pl->overflow_err_count = cpu_to_le16(log->overflow_err_count);
182 pl->first_overflow_timestamp =
183 cpu_to_le64(log->first_overflow_timestamp);
184 pl->last_overflow_timestamp =
185 cpu_to_le64(log->last_overflow_timestamp);
186 }
187
188 pl->record_count = cpu_to_le16(nr);
189 *len = CXL_EVENT_PAYLOAD_HDR_SIZE + (CXL_EVENT_RECORD_SIZE * nr);
190
191 return CXL_MBOX_SUCCESS;
192 }
193
cxl_event_clear_records(CXLDeviceState * cxlds,CXLClearEventPayload * pl)194 CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
195 CXLClearEventPayload *pl)
196 {
197 CXLEventLog *log;
198 uint8_t log_type;
199 CXLEvent *entry;
200 int nr;
201
202 log_type = pl->event_log;
203
204 if (log_type >= CXL_EVENT_TYPE_MAX) {
205 return CXL_MBOX_INVALID_INPUT;
206 }
207
208 log = &cxlds->event_logs[log_type];
209
210 QEMU_LOCK_GUARD(&log->lock);
211 /*
212 * Must iterate the queue twice.
213 * "The device shall verify the event record handles specified in the input
214 * payload are in temporal order. If the device detects an older event
215 * record that will not be cleared when Clear Event Records is executed,
216 * the device shall return the Invalid Handle return code and shall not
217 * clear any of the specified event records."
218 * -- CXL r3.1 Section 8.2.9.2.3: Clear Event Records (0101h)
219 */
220 entry = cxl_event_get_head(log);
221 for (nr = 0; entry && nr < pl->nr_recs; nr++) {
222 uint16_t handle = pl->handle[nr];
223
224 /* NOTE: Both handles are little endian. */
225 if (handle == 0 || entry->data.hdr.handle != handle) {
226 return CXL_MBOX_INVALID_INPUT;
227 }
228 entry = cxl_event_get_next(entry);
229 }
230
231 entry = cxl_event_get_head(log);
232 for (nr = 0; entry && nr < pl->nr_recs; nr++) {
233 cxl_event_delete_head(cxlds, log_type, log);
234 entry = cxl_event_get_head(log);
235 }
236
237 return CXL_MBOX_SUCCESS;
238 }
239
cxl_event_irq_assert(CXLType3Dev * ct3d)240 void cxl_event_irq_assert(CXLType3Dev *ct3d)
241 {
242 CXLDeviceState *cxlds = &ct3d->cxl_dstate;
243 PCIDevice *pdev = &ct3d->parent_obj;
244 int i;
245
246 for (i = 0; i < CXL_EVENT_TYPE_MAX; i++) {
247 CXLEventLog *log = &cxlds->event_logs[i];
248
249 if (!log->irq_enabled || cxl_event_empty(log)) {
250 continue;
251 }
252
253 /* Notifies interrupt, legacy IRQ is not supported */
254 if (msix_enabled(pdev)) {
255 msix_notify(pdev, log->irq_vec);
256 } else if (msi_enabled(pdev)) {
257 msi_notify(pdev, log->irq_vec);
258 }
259 }
260 }
261
cxl_create_dc_event_records_for_extents(CXLType3Dev * ct3d,CXLDCEventType type,CXLDCExtentRaw extents[],uint32_t ext_count)262 void cxl_create_dc_event_records_for_extents(CXLType3Dev *ct3d,
263 CXLDCEventType type,
264 CXLDCExtentRaw extents[],
265 uint32_t ext_count)
266 {
267 CXLEventDynamicCapacity event_rec = {};
268 int i;
269
270 cxl_assign_event_header(&event_rec.hdr,
271 &dynamic_capacity_uuid,
272 (1 << CXL_EVENT_TYPE_INFO),
273 sizeof(event_rec),
274 cxl_device_get_timestamp(&ct3d->cxl_dstate));
275 event_rec.type = type;
276 event_rec.validity_flags = 1;
277 event_rec.host_id = 0;
278 event_rec.updated_region_id = 0;
279 event_rec.extents_avail = CXL_NUM_EXTENTS_SUPPORTED -
280 ct3d->dc.total_extent_count;
281
282 for (i = 0; i < ext_count; i++) {
283 memcpy(&event_rec.dynamic_capacity_extent,
284 &extents[i],
285 sizeof(CXLDCExtentRaw));
286 event_rec.flags = 0;
287 if (i < ext_count - 1) {
288 /* Set "More" flag */
289 event_rec.flags |= BIT(0);
290 }
291
292 if (cxl_event_insert(&ct3d->cxl_dstate,
293 CXL_EVENT_TYPE_DYNAMIC_CAP,
294 (CXLEventRecordRaw *)&event_rec)) {
295 cxl_event_irq_assert(ct3d);
296 }
297 }
298 }
299