xref: /openbmc/qemu/hw/intc/xive2.c (revision 95d729e2bc5b46d40e71971043e03d9cc9503e9a)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation..
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "hw/qdev-properties.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/xive.h"
20 #include "hw/ppc/xive2.h"
21 #include "hw/ppc/xive2_regs.h"
22 
23 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon)
24 {
25     if (!xive2_eas_is_valid(eas)) {
26         return;
27     }
28 
29     monitor_printf(mon, "  %08x %s end:%02x/%04x data:%08x\n",
30                    lisn, xive2_eas_is_masked(eas) ? "M" : " ",
31                    (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
32                    (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
33                    (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
34 }
35 
36 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
37                                     Monitor *mon)
38 {
39     uint64_t qaddr_base = xive2_end_qaddr(end);
40     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
41     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
42     uint32_t qentries = 1 << (qsize + 10);
43     int i;
44 
45     /*
46      * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
47      */
48     monitor_printf(mon, " [ ");
49     qindex = (qindex - (width - 1)) & (qentries - 1);
50     for (i = 0; i < width; i++) {
51         uint64_t qaddr = qaddr_base + (qindex << 2);
52         uint32_t qdata = -1;
53 
54         if (dma_memory_read(&address_space_memory, qaddr, &qdata,
55                             sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
56             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
57                           HWADDR_PRIx "\n", qaddr);
58             return;
59         }
60         monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
61                        be32_to_cpu(qdata));
62         qindex = (qindex + 1) & (qentries - 1);
63     }
64     monitor_printf(mon, "]");
65 }
66 
67 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon)
68 {
69     uint64_t qaddr_base = xive2_end_qaddr(end);
70     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
71     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
72     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
73     uint32_t qentries = 1 << (qsize + 10);
74 
75     uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
76     uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
77     uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
78     uint8_t pq;
79 
80     if (!xive2_end_is_valid(end)) {
81         return;
82     }
83 
84     pq = xive_get_field32(END2_W1_ESn, end->w1);
85 
86     monitor_printf(mon,
87                    "  %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x",
88                    end_idx,
89                    pq & XIVE_ESB_VAL_P ? 'P' : '-',
90                    pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
91                    xive2_end_is_valid(end)    ? 'v' : '-',
92                    xive2_end_is_enqueue(end)  ? 'q' : '-',
93                    xive2_end_is_notify(end)   ? 'n' : '-',
94                    xive2_end_is_backlog(end)  ? 'b' : '-',
95                    xive2_end_is_escalate(end) ? 'e' : '-',
96                    xive2_end_is_escalate_end(end) ? 'N' : '-',
97                    xive2_end_is_uncond_escalation(end)   ? 'u' : '-',
98                    xive2_end_is_silent_escalation(end)   ? 's' : '-',
99                    xive2_end_is_firmware1(end)   ? 'f' : '-',
100                    xive2_end_is_firmware2(end)   ? 'F' : '-',
101                    priority, nvp_blk, nvp_idx);
102 
103     if (qaddr_base) {
104         monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
105                        qaddr_base, qindex, qentries, qgen);
106         xive2_end_queue_pic_print_info(end, 6, mon);
107     }
108     monitor_printf(mon, "\n");
109 }
110 
111 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
112                                   Monitor *mon)
113 {
114     Xive2Eas *eas = (Xive2Eas *) &end->w4;
115     uint8_t pq;
116 
117     if (!xive2_end_is_escalate(end)) {
118         return;
119     }
120 
121     pq = xive_get_field32(END2_W1_ESe, end->w1);
122 
123     monitor_printf(mon, "  %08x %c%c %c%c end:%02x/%04x data:%08x\n",
124                    end_idx,
125                    pq & XIVE_ESB_VAL_P ? 'P' : '-',
126                    pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
127                    xive2_eas_is_valid(eas) ? 'v' : ' ',
128                    xive2_eas_is_masked(eas) ? 'M' : ' ',
129                    (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
130                    (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
131                    (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
132 }
133 
134 static void xive2_end_enqueue(Xive2End *end, uint32_t data)
135 {
136     uint64_t qaddr_base = xive2_end_qaddr(end);
137     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
138     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
139     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
140 
141     uint64_t qaddr = qaddr_base + (qindex << 2);
142     uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
143     uint32_t qentries = 1 << (qsize + 10);
144 
145     if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
146                          MEMTXATTRS_UNSPECIFIED)) {
147         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
148                       HWADDR_PRIx "\n", qaddr);
149         return;
150     }
151 
152     qindex = (qindex + 1) & (qentries - 1);
153     if (qindex == 0) {
154         qgen ^= 1;
155         end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
156 
157         /* TODO(PowerNV): reset GF bit on a cache watch operation */
158         end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
159     }
160     end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
161 }
162 
163 /*
164  * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
165  */
166 
167 static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
168                                 uint32_t *nvp_idx, bool *vo)
169 {
170     *nvp_blk = xive2_nvp_blk(cam);
171     *nvp_idx = xive2_nvp_idx(cam);
172     *vo = !!(cam & TM2_QW1W2_VO);
173 }
174 
175 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
176                               hwaddr offset, unsigned size)
177 {
178     uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
179     uint32_t qw1w2_new;
180     uint32_t cam = be32_to_cpu(qw1w2);
181     uint8_t nvp_blk;
182     uint32_t nvp_idx;
183     bool vo;
184 
185     xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo);
186 
187     if (!vo) {
188         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
189                       nvp_blk, nvp_idx);
190     }
191 
192     /* Invalidate CAM line */
193     qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
194     memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
195 
196     return qw1w2;
197 }
198 
199 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
200                                    uint8_t nvp_blk, uint32_t nvp_idx)
201 {
202     Xive2Nvp nvp;
203     uint8_t ipb;
204     uint8_t cppr = 0;
205 
206     /*
207      * Grab the associated thread interrupt context registers in the
208      * associated NVP
209      */
210     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
211         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
212                       nvp_blk, nvp_idx);
213         return;
214     }
215 
216     if (!xive2_nvp_is_valid(&nvp)) {
217         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
218                       nvp_blk, nvp_idx);
219         return;
220     }
221 
222     ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
223     if (ipb) {
224         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
225         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
226     }
227 
228     /* An IPB or CPPR change can trigger a resend */
229     if (ipb || cppr) {
230         xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
231     }
232 }
233 
234 /*
235  * Updating the OS CAM line can trigger a resend of interrupt
236  */
237 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
238                           hwaddr offset, uint64_t value, unsigned size)
239 {
240     uint32_t cam = value;
241     uint32_t qw1w2 = cpu_to_be32(cam);
242     uint8_t nvp_blk;
243     uint32_t nvp_idx;
244     bool vo;
245 
246     xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo);
247 
248     /* First update the thead context */
249     memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
250 
251     /* Check the interrupt pending bits */
252     if (vo) {
253         xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx);
254     }
255 }
256 
257 /*
258  * XIVE Router (aka. Virtualization Controller or IVRE)
259  */
260 
261 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
262                          Xive2Eas *eas)
263 {
264     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
265 
266     return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
267 }
268 
269 static
270 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
271                        uint8_t *pq)
272 {
273     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
274 
275     return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
276 }
277 
278 static
279 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
280                        uint8_t *pq)
281 {
282     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
283 
284     return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
285 }
286 
287 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
288                          Xive2End *end)
289 {
290    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
291 
292    return xrc->get_end(xrtr, end_blk, end_idx, end);
293 }
294 
295 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
296                            Xive2End *end, uint8_t word_number)
297 {
298    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
299 
300    return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
301 }
302 
303 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
304                          Xive2Nvp *nvp)
305 {
306    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
307 
308    return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
309 }
310 
311 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
312                            Xive2Nvp *nvp, uint8_t word_number)
313 {
314    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
315 
316    return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
317 }
318 
319 static int xive2_router_get_block_id(Xive2Router *xrtr)
320 {
321    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
322 
323    return xrc->get_block_id(xrtr);
324 }
325 
326 /*
327  * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
328  * width and block id width is configurable at the IC level.
329  *
330  *    chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
331  *    chipid << 24 | 0000 0000 0000 0001 threadid   (8Bit)
332  */
333 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
334 {
335     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
336     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
337     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
338     uint8_t blk = xive2_router_get_block_id(xrtr);
339     uint8_t tid_shift = 7;
340     uint8_t tid_mask = (1 << tid_shift) - 1;
341 
342     return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
343 }
344 
345 /*
346  * The thread context register words are in big-endian format.
347  */
348 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
349                                uint8_t format,
350                                uint8_t nvt_blk, uint32_t nvt_idx,
351                                bool cam_ignore, uint32_t logic_serv)
352 {
353     uint32_t cam =   xive2_nvp_cam_line(nvt_blk, nvt_idx);
354     uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
355     uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
356     uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
357     uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
358 
359     /*
360      * TODO (PowerNV): ignore mode. The low order bits of the NVT
361      * identifier are ignored in the "CAM" match.
362      */
363 
364     if (format == 0) {
365         if (cam_ignore == true) {
366             /*
367              * F=0 & i=1: Logical server notification (bits ignored at
368              * the end of the NVT identifier)
369              */
370             qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
371                           nvt_blk, nvt_idx);
372             return -1;
373         }
374 
375         /* F=0 & i=0: Specific NVT notification */
376 
377         /* PHYS ring */
378         if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
379             cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
380             return TM_QW3_HV_PHYS;
381         }
382 
383         /* HV POOL ring */
384         if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
385             cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
386             return TM_QW2_HV_POOL;
387         }
388 
389         /* OS ring */
390         if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
391             cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
392             return TM_QW1_OS;
393         }
394     } else {
395         /* F=1 : User level Event-Based Branch (EBB) notification */
396 
397         /* USER ring */
398         if  ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
399              (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
400              (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
401              (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
402             return TM_QW0_USER;
403         }
404     }
405     return -1;
406 }
407 
408 static void xive2_router_realize(DeviceState *dev, Error **errp)
409 {
410     Xive2Router *xrtr = XIVE2_ROUTER(dev);
411 
412     assert(xrtr->xfb);
413 }
414 
415 /*
416  * Notification using the END ESe/ESn bit (Event State Buffer for
417  * escalation and notification). Profide futher coalescing in the
418  * Router.
419  */
420 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
421                                        uint32_t end_idx, Xive2End *end,
422                                        uint32_t end_esmask)
423 {
424     uint8_t pq = xive_get_field32(end_esmask, end->w1);
425     bool notify = xive_esb_trigger(&pq);
426 
427     if (pq != xive_get_field32(end_esmask, end->w1)) {
428         end->w1 = xive_set_field32(end_esmask, end->w1, pq);
429         xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
430     }
431 
432     /* ESe/n[Q]=1 : end of notification */
433     return notify;
434 }
435 
436 /*
437  * An END trigger can come from an event trigger (IPI or HW) or from
438  * another chip. We don't model the PowerBus but the END trigger
439  * message has the same parameters than in the function below.
440  */
441 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
442                                     uint32_t end_idx, uint32_t end_data)
443 {
444     Xive2End end;
445     uint8_t priority;
446     uint8_t format;
447     bool found;
448     Xive2Nvp nvp;
449     uint8_t nvp_blk;
450     uint32_t nvp_idx;
451 
452     /* END cache lookup */
453     if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
454         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
455                       end_idx);
456         return;
457     }
458 
459     if (!xive2_end_is_valid(&end)) {
460         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
461                       end_blk, end_idx);
462         return;
463     }
464 
465     if (xive2_end_is_enqueue(&end)) {
466         xive2_end_enqueue(&end, end_data);
467         /* Enqueuing event data modifies the EQ toggle and index */
468         xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
469     }
470 
471     /*
472      * When the END is silent, we skip the notification part.
473      */
474     if (xive2_end_is_silent_escalation(&end)) {
475         goto do_escalation;
476     }
477 
478     /*
479      * The W7 format depends on the F bit in W6. It defines the type
480      * of the notification :
481      *
482      *   F=0 : single or multiple NVP notification
483      *   F=1 : User level Event-Based Branch (EBB) notification, no
484      *         priority
485      */
486     format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
487     priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
488 
489     /* The END is masked */
490     if (format == 0 && priority == 0xff) {
491         return;
492     }
493 
494     /*
495      * Check the END ESn (Event State Buffer for notification) for
496      * even futher coalescing in the Router
497      */
498     if (!xive2_end_is_notify(&end)) {
499         /* ESn[Q]=1 : end of notification */
500         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
501                                        &end, END2_W1_ESn)) {
502             return;
503         }
504     }
505 
506     /*
507      * Follows IVPE notification
508      */
509     nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
510     nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
511 
512     /* NVP cache lookup */
513     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
514         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
515                       nvp_blk, nvp_idx);
516         return;
517     }
518 
519     if (!xive2_nvp_is_valid(&nvp)) {
520         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
521                       nvp_blk, nvp_idx);
522         return;
523     }
524 
525     found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
526                           xive_get_field32(END2_W6_IGNORE, end.w7),
527                           priority,
528                           xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
529 
530     /* TODO: Auto EOI. */
531 
532     if (found) {
533         return;
534     }
535 
536     /*
537      * If no matching NVP is dispatched on a HW thread :
538      * - specific VP: update the NVP structure if backlog is activated
539      * - logical server : forward request to IVPE (not supported)
540      */
541     if (xive2_end_is_backlog(&end)) {
542         uint8_t ipb;
543 
544         if (format == 1) {
545             qemu_log_mask(LOG_GUEST_ERROR,
546                           "XIVE: END %x/%x invalid config: F1 & backlog\n",
547                           end_blk, end_idx);
548             return;
549         }
550 
551         /*
552          * Record the IPB in the associated NVP structure for later
553          * use. The presenter will resend the interrupt when the vCPU
554          * is dispatched again on a HW thread.
555          */
556         ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
557             xive_priority_to_ipb(priority);
558         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
559         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
560 
561         /*
562          * On HW, follows a "Broadcast Backlog" to IVPEs
563          */
564     }
565 
566 do_escalation:
567     /*
568      * If activated, escalate notification using the ESe PQ bits and
569      * the EAS in w4-5
570      */
571     if (!xive2_end_is_escalate(&end)) {
572         return;
573     }
574 
575     /*
576      * Check the END ESe (Event State Buffer for escalation) for even
577      * futher coalescing in the Router
578      */
579     if (!xive2_end_is_uncond_escalation(&end)) {
580         /* ESe[Q]=1 : end of escalation notification */
581         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
582                                        &end, END2_W1_ESe)) {
583             return;
584         }
585     }
586 
587     /*
588      * The END trigger becomes an Escalation trigger
589      */
590     xive2_router_end_notify(xrtr,
591                            xive_get_field32(END2_W4_END_BLOCK,     end.w4),
592                            xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
593                            xive_get_field32(END2_W5_ESC_END_DATA,  end.w5));
594 }
595 
596 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
597 {
598     Xive2Router *xrtr = XIVE2_ROUTER(xn);
599     uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
600     uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
601     Xive2Eas eas;
602 
603     /* EAS cache lookup */
604     if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
605         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
606         return;
607     }
608 
609     if (!pq_checked) {
610         bool notify;
611         uint8_t pq;
612 
613         /* PQ cache lookup */
614         if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
615             /* Set FIR */
616             g_assert_not_reached();
617         }
618 
619         notify = xive_esb_trigger(&pq);
620 
621         if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
622             /* Set FIR */
623             g_assert_not_reached();
624         }
625 
626         if (!notify) {
627             return;
628         }
629     }
630 
631     if (!xive2_eas_is_valid(&eas)) {
632         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
633         return;
634     }
635 
636     if (xive2_eas_is_masked(&eas)) {
637         /* Notification completed */
638         return;
639     }
640 
641     /*
642      * The event trigger becomes an END trigger
643      */
644     xive2_router_end_notify(xrtr,
645                              xive_get_field64(EAS2_END_BLOCK, eas.w),
646                              xive_get_field64(EAS2_END_INDEX, eas.w),
647                              xive_get_field64(EAS2_END_DATA,  eas.w));
648 }
649 
650 static Property xive2_router_properties[] = {
651     DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
652                      TYPE_XIVE_FABRIC, XiveFabric *),
653     DEFINE_PROP_END_OF_LIST(),
654 };
655 
656 static void xive2_router_class_init(ObjectClass *klass, void *data)
657 {
658     DeviceClass *dc = DEVICE_CLASS(klass);
659     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
660 
661     dc->desc    = "XIVE2 Router Engine";
662     device_class_set_props(dc, xive2_router_properties);
663     /* Parent is SysBusDeviceClass. No need to call its realize hook */
664     dc->realize = xive2_router_realize;
665     xnc->notify = xive2_router_notify;
666 }
667 
668 static const TypeInfo xive2_router_info = {
669     .name          = TYPE_XIVE2_ROUTER,
670     .parent        = TYPE_SYS_BUS_DEVICE,
671     .abstract      = true,
672     .instance_size = sizeof(Xive2Router),
673     .class_size    = sizeof(Xive2RouterClass),
674     .class_init    = xive2_router_class_init,
675     .interfaces    = (InterfaceInfo[]) {
676         { TYPE_XIVE_NOTIFIER },
677         { TYPE_XIVE_PRESENTER },
678         { }
679     }
680 };
681 
682 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
683 {
684     return !((addr >> shift) & 1);
685 }
686 
687 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
688 {
689     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
690     uint32_t offset = addr & 0xFFF;
691     uint8_t end_blk;
692     uint32_t end_idx;
693     Xive2End end;
694     uint32_t end_esmask;
695     uint8_t pq;
696     uint64_t ret;
697 
698     /*
699      * The block id should be deduced from the load address on the END
700      * ESB MMIO but our model only supports a single block per XIVE chip.
701      */
702     end_blk = xive2_router_get_block_id(xsrc->xrtr);
703     end_idx = addr >> (xsrc->esb_shift + 1);
704 
705     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
706         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
707                       end_idx);
708         return -1;
709     }
710 
711     if (!xive2_end_is_valid(&end)) {
712         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
713                       end_blk, end_idx);
714         return -1;
715     }
716 
717     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
718         END2_W1_ESe;
719     pq = xive_get_field32(end_esmask, end.w1);
720 
721     switch (offset) {
722     case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
723         ret = xive_esb_eoi(&pq);
724 
725         /* Forward the source event notification for routing ?? */
726         break;
727 
728     case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
729         ret = pq;
730         break;
731 
732     case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
733     case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
734     case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
735     case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
736         ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
737         break;
738     default:
739         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
740                       offset);
741         return -1;
742     }
743 
744     if (pq != xive_get_field32(end_esmask, end.w1)) {
745         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
746         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
747     }
748 
749     return ret;
750 }
751 
752 static void xive2_end_source_write(void *opaque, hwaddr addr,
753                                    uint64_t value, unsigned size)
754 {
755     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
756     uint32_t offset = addr & 0xFFF;
757     uint8_t end_blk;
758     uint32_t end_idx;
759     Xive2End end;
760     uint32_t end_esmask;
761     uint8_t pq;
762     bool notify = false;
763 
764     /*
765      * The block id should be deduced from the load address on the END
766      * ESB MMIO but our model only supports a single block per XIVE chip.
767      */
768     end_blk = xive2_router_get_block_id(xsrc->xrtr);
769     end_idx = addr >> (xsrc->esb_shift + 1);
770 
771     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
772         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
773                       end_idx);
774         return;
775     }
776 
777     if (!xive2_end_is_valid(&end)) {
778         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
779                       end_blk, end_idx);
780         return;
781     }
782 
783     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
784         END2_W1_ESe;
785     pq = xive_get_field32(end_esmask, end.w1);
786 
787     switch (offset) {
788     case 0 ... 0x3FF:
789         notify = xive_esb_trigger(&pq);
790         break;
791 
792     case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
793         /* TODO: can we check StoreEOI availability from the router ? */
794         notify = xive_esb_eoi(&pq);
795         break;
796 
797     case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
798         if (end_esmask == END2_W1_ESe) {
799             qemu_log_mask(LOG_GUEST_ERROR,
800                           "XIVE: END %x/%x can not EQ inject on ESe\n",
801                            end_blk, end_idx);
802             return;
803         }
804         notify = true;
805         break;
806 
807     default:
808         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
809                       offset);
810         return;
811     }
812 
813     if (pq != xive_get_field32(end_esmask, end.w1)) {
814         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
815         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
816     }
817 
818     /* TODO: Forward the source event notification for routing */
819     if (notify) {
820         ;
821     }
822 }
823 
824 static const MemoryRegionOps xive2_end_source_ops = {
825     .read = xive2_end_source_read,
826     .write = xive2_end_source_write,
827     .endianness = DEVICE_BIG_ENDIAN,
828     .valid = {
829         .min_access_size = 8,
830         .max_access_size = 8,
831     },
832     .impl = {
833         .min_access_size = 8,
834         .max_access_size = 8,
835     },
836 };
837 
838 static void xive2_end_source_realize(DeviceState *dev, Error **errp)
839 {
840     Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
841 
842     assert(xsrc->xrtr);
843 
844     if (!xsrc->nr_ends) {
845         error_setg(errp, "Number of interrupt needs to be greater than 0");
846         return;
847     }
848 
849     if (xsrc->esb_shift != XIVE_ESB_4K &&
850         xsrc->esb_shift != XIVE_ESB_64K) {
851         error_setg(errp, "Invalid ESB shift setting");
852         return;
853     }
854 
855     /*
856      * Each END is assigned an even/odd pair of MMIO pages, the even page
857      * manages the ESn field while the odd page manages the ESe field.
858      */
859     memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
860                           &xive2_end_source_ops, xsrc, "xive.end",
861                           (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
862 }
863 
864 static Property xive2_end_source_properties[] = {
865     DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
866     DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
867     DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
868                      Xive2Router *),
869     DEFINE_PROP_END_OF_LIST(),
870 };
871 
872 static void xive2_end_source_class_init(ObjectClass *klass, void *data)
873 {
874     DeviceClass *dc = DEVICE_CLASS(klass);
875 
876     dc->desc    = "XIVE END Source";
877     device_class_set_props(dc, xive2_end_source_properties);
878     dc->realize = xive2_end_source_realize;
879 }
880 
881 static const TypeInfo xive2_end_source_info = {
882     .name          = TYPE_XIVE2_END_SOURCE,
883     .parent        = TYPE_DEVICE,
884     .instance_size = sizeof(Xive2EndSource),
885     .class_init    = xive2_end_source_class_init,
886 };
887 
888 static void xive2_register_types(void)
889 {
890     type_register_static(&xive2_router_info);
891     type_register_static(&xive2_end_source_info);
892 }
893 
894 type_init(xive2_register_types)
895