xref: /openbmc/qemu/hw/intc/xive2.c (revision 58fa4433e02a394872ff83c28edea174c43e6afb)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3  *
4  * Copyright (c) 2019-2024, IBM Corporation..
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "system/cpus.h"
15 #include "system/dma.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/ppc/xive.h"
18 #include "hw/ppc/xive2.h"
19 #include "hw/ppc/xive2_regs.h"
20 
21 uint32_t xive2_router_get_config(Xive2Router *xrtr)
22 {
23     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
24 
25     return xrc->get_config(xrtr);
26 }
27 
28 static int xive2_router_get_block_id(Xive2Router *xrtr)
29 {
30    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
31 
32    return xrc->get_block_id(xrtr);
33 }
34 
35 static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
36 {
37     uint64_t cache_addr;
38 
39     cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
40         xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
41     cache_addr <<= 8; /* aligned on a cache line pair */
42     return cache_addr;
43 }
44 
45 static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
46 {
47     uint32_t val = 0;
48     uint8_t *ptr, i;
49 
50     if (priority > 7) {
51         return 0;
52     }
53 
54     /*
55      * The per-priority backlog counters are 24-bit and the structure
56      * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from
57      * w2, which fits 8 priorities * 24-bits per priority.
58      */
59     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
60     for (i = 0; i < 3; i++, ptr++) {
61         val = (val << 8) + *ptr;
62     }
63     return val;
64 }
65 
66 static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority,
67                                    uint32_t val)
68 {
69     uint8_t *ptr, i;
70     uint32_t shift;
71 
72     if (priority > 7) {
73         return;
74     }
75 
76     if (val > 0xFFFFFF) {
77         val = 0xFFFFFF;
78     }
79     /*
80      * The per-priority backlog counters are 24-bit and the structure
81      * is stored in big endian
82      */
83     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
84     for (i = 0; i < 3; i++, ptr++) {
85         shift = 8 * (2 - i);
86         *ptr = (val >> shift) & 0xFF;
87     }
88 }
89 
90 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
91 {
92     if (!xive2_eas_is_valid(eas)) {
93         return;
94     }
95 
96     g_string_append_printf(buf, "  %08x %s end:%02x/%04x data:%08x\n",
97                            lisn, xive2_eas_is_masked(eas) ? "M" : " ",
98                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
99                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
100                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
101 }
102 
103 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf)
104 {
105     uint64_t qaddr_base = xive2_end_qaddr(end);
106     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
107     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
108     uint32_t qentries = 1 << (qsize + 10);
109     int i;
110 
111     /*
112      * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
113      */
114     g_string_append_printf(buf, " [ ");
115     qindex = (qindex - (width - 1)) & (qentries - 1);
116     for (i = 0; i < width; i++) {
117         uint64_t qaddr = qaddr_base + (qindex << 2);
118         uint32_t qdata = -1;
119 
120         if (dma_memory_read(&address_space_memory, qaddr, &qdata,
121                             sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
122             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
123                           HWADDR_PRIx "\n", qaddr);
124             return;
125         }
126         g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
127                                be32_to_cpu(qdata));
128         qindex = (qindex + 1) & (qentries - 1);
129     }
130     g_string_append_printf(buf, "]");
131 }
132 
133 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
134 {
135     uint64_t qaddr_base = xive2_end_qaddr(end);
136     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
137     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
138     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
139     uint32_t qentries = 1 << (qsize + 10);
140 
141     uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
142     uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
143     uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
144     uint8_t pq;
145 
146     if (!xive2_end_is_valid(end)) {
147         return;
148     }
149 
150     pq = xive_get_field32(END2_W1_ESn, end->w1);
151 
152     g_string_append_printf(buf,
153                            "  %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
154                            "prio:%d nvp:%02x/%04x",
155                            end_idx,
156                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
157                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
158                            xive2_end_is_valid(end)    ? 'v' : '-',
159                            xive2_end_is_enqueue(end)  ? 'q' : '-',
160                            xive2_end_is_notify(end)   ? 'n' : '-',
161                            xive2_end_is_backlog(end)  ? 'b' : '-',
162                            xive2_end_is_precluded_escalation(end) ? 'p' : '-',
163                            xive2_end_is_escalate(end) ? 'e' : '-',
164                            xive2_end_is_escalate_end(end) ? 'N' : '-',
165                            xive2_end_is_uncond_escalation(end)   ? 'u' : '-',
166                            xive2_end_is_silent_escalation(end)   ? 's' : '-',
167                            xive2_end_is_firmware1(end)   ? 'f' : '-',
168                            xive2_end_is_firmware2(end)   ? 'F' : '-',
169                            xive2_end_is_ignore(end) ? 'i' : '-',
170                            xive2_end_is_crowd(end)  ? 'c' : '-',
171                            priority, nvp_blk, nvp_idx);
172 
173     if (qaddr_base) {
174         g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
175                                qaddr_base, qindex, qentries, qgen);
176         xive2_end_queue_pic_print_info(end, 6, buf);
177     }
178     g_string_append_c(buf, '\n');
179 }
180 
181 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
182                                   GString *buf)
183 {
184     Xive2Eas *eas = (Xive2Eas *) &end->w4;
185     uint8_t pq;
186 
187     if (!xive2_end_is_escalate(end)) {
188         return;
189     }
190 
191     pq = xive_get_field32(END2_W1_ESe, end->w1);
192 
193     g_string_append_printf(buf, "  %08x %c%c %c%c end:%02x/%04x data:%08x\n",
194                            end_idx,
195                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
196                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
197                            xive2_eas_is_valid(eas) ? 'v' : ' ',
198                            xive2_eas_is_masked(eas) ? 'M' : ' ',
199                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
200                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
201                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
202 }
203 
204 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
205 {
206     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
207     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
208     uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
209 
210     if (!xive2_nvp_is_valid(nvp)) {
211         return;
212     }
213 
214     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
215                            nvp_idx, eq_blk, eq_idx,
216                            xive_get_field32(NVP2_W2_IPB, nvp->w2),
217                            xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
218     if (cache_line) {
219         g_string_append_printf(buf, "  reporting CL:%016"PRIx64, cache_line);
220     }
221 
222     /*
223      * When the NVP is HW controlled, more fields are updated
224      */
225     if (xive2_nvp_is_hw(nvp)) {
226         g_string_append_printf(buf, " CPPR:%02x",
227                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
228         if (xive2_nvp_is_co(nvp)) {
229             g_string_append_printf(buf, " CO:%04x",
230                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
231         }
232     }
233     g_string_append_c(buf, '\n');
234 }
235 
236 void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
237 {
238     uint8_t i;
239 
240     if (!xive2_nvgc_is_valid(nvgc)) {
241         return;
242     }
243 
244     g_string_append_printf(buf, "  %08x PGoNext:%02x bklog: ", nvgc_idx,
245                            xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
246     for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
247         g_string_append_printf(buf, "[%d]=0x%x ",
248                                i, xive2_nvgc_get_backlog(nvgc, i));
249     }
250     g_string_append_printf(buf, "\n");
251 }
252 
253 static void xive2_end_enqueue(Xive2End *end, uint32_t data)
254 {
255     uint64_t qaddr_base = xive2_end_qaddr(end);
256     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
257     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
258     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
259 
260     uint64_t qaddr = qaddr_base + (qindex << 2);
261     uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
262     uint32_t qentries = 1 << (qsize + 10);
263 
264     if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
265                          MEMTXATTRS_UNSPECIFIED)) {
266         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
267                       HWADDR_PRIx "\n", qaddr);
268         return;
269     }
270 
271     qindex = (qindex + 1) & (qentries - 1);
272     if (qindex == 0) {
273         qgen ^= 1;
274         end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
275 
276         /* TODO(PowerNV): reset GF bit on a cache watch operation */
277         end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
278     }
279     end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
280 }
281 
282 /*
283  * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
284  *
285  * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
286  *
287  *   - if a context is enabled with the H bit set, the VP context
288  *     information is retrieved from the NVP structure (“check out”)
289  *     and stored back on a context pull (“check in”), the SW receives
290  *     the same context pull information as on P9
291  *
292  *   - the H bit cannot be changed while the V bit is set, i.e. a
293  *     context cannot be set up in the TIMA and then be “pushed” into
294  *     the NVP by changing the H bit while the context is enabled
295  */
296 
297 static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
298                                 uint8_t nvp_blk, uint32_t nvp_idx,
299                                 uint8_t ring)
300 {
301     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
302     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
303     Xive2Nvp nvp;
304     uint8_t *regs = &tctx->regs[ring];
305 
306     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
307         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
308                           nvp_blk, nvp_idx);
309         return;
310     }
311 
312     if (!xive2_nvp_is_valid(&nvp)) {
313         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
314                       nvp_blk, nvp_idx);
315         return;
316     }
317 
318     if (!xive2_nvp_is_hw(&nvp)) {
319         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
320                       nvp_blk, nvp_idx);
321         return;
322     }
323 
324     if (!xive2_nvp_is_co(&nvp)) {
325         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
326                       nvp_blk, nvp_idx);
327         return;
328     }
329 
330     if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
331         xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
332         qemu_log_mask(LOG_GUEST_ERROR,
333                       "XIVE: NVP %x/%x invalid checkout Thread %x\n",
334                       nvp_blk, nvp_idx, pir);
335         return;
336     }
337 
338     nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
339     nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
340     if (nvp.w0 & NVP2_W0_L) {
341         /*
342          * Typically not used. If LSMFB is restored with 0, it will
343          * force a backlog rescan
344          */
345         nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
346     }
347     if (nvp.w0 & NVP2_W0_G) {
348         nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]);
349     }
350     if (nvp.w0 & NVP2_W0_T) {
351         nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]);
352     }
353     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
354 
355     nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
356     /* NVP2_W1_CO_THRID_VALID only set once */
357     nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
358     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
359 }
360 
361 static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
362                              uint32_t *nvp_idx, bool *valid, bool *hw)
363 {
364     *nvp_blk = xive2_nvp_blk(cam);
365     *nvp_idx = xive2_nvp_idx(cam);
366     *valid = !!(cam & TM2_W2_VALID);
367     *hw = !!(cam & TM2_W2_HW);
368 }
369 
370 /*
371  * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
372  * width and block id width is configurable at the IC level.
373  *
374  *    chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
375  *    chipid << 24 | 0000 0000 0000 0001 threadid   (8Bit)
376  */
377 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
378 {
379     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
380     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
381     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
382     uint8_t blk = xive2_router_get_block_id(xrtr);
383     uint8_t tid_shift =
384         xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
385     uint8_t tid_mask = (1 << tid_shift) - 1;
386 
387     return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
388 }
389 
390 static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
391                                   hwaddr offset, unsigned size, uint8_t ring)
392 {
393     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
394     uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
395     uint32_t cam = be32_to_cpu(target_ringw2);
396     uint8_t nvp_blk;
397     uint32_t nvp_idx;
398     uint8_t cur_ring;
399     bool valid;
400     bool do_save;
401 
402     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
403 
404     if (!valid) {
405         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
406                       nvp_blk, nvp_idx);
407     }
408 
409     /* Invalidate CAM line of requested ring and all lower rings */
410     for (cur_ring = TM_QW0_USER; cur_ring <= ring;
411          cur_ring += XIVE_TM_RING_SIZE) {
412         uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
413         uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
414         memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
415     }
416 
417     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
418         xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
419     }
420 
421     /*
422      * Lower external interrupt line of requested ring and below except for
423      * USER, which doesn't exist.
424      */
425     for (cur_ring = TM_QW1_OS; cur_ring <= ring;
426          cur_ring += XIVE_TM_RING_SIZE) {
427         xive_tctx_reset_signal(tctx, cur_ring);
428     }
429     return target_ringw2;
430 }
431 
432 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
433                               hwaddr offset, unsigned size)
434 {
435     return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
436 }
437 
438 #define REPORT_LINE_GEN1_SIZE       16
439 
440 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
441                                       uint8_t size)
442 {
443     uint8_t *regs = tctx->regs;
444 
445     g_assert(size == REPORT_LINE_GEN1_SIZE);
446     memset(data, 0, size);
447     /*
448      * See xive architecture for description of what is saved. It is
449      * hand-picked information to fit in 16 bytes.
450      */
451     data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
452     data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
453     data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
454     data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
455     data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
456     data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
457     data[0x6] = 0xFF;
458     data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
459     data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
460     data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
461     data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
462     data[0x8] = regs[TM_QW1_OS + TM_NSR];
463     data[0x9] = regs[TM_QW1_OS + TM_CPPR];
464     data[0xA] = regs[TM_QW1_OS + TM_IPB];
465     data[0xB] = regs[TM_QW1_OS + TM_LGS];
466     if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
467         /*
468          * Logical server extension, except VU bit replaced by EB bit
469          * from NSR
470          */
471         data[0xC] = regs[TM_QW0_USER + TM_WORD2];
472         data[0xC] &= ~0x80;
473         data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
474         data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
475         data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
476         data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
477     }
478 }
479 
480 static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
481                                  hwaddr offset, uint64_t value,
482                                  unsigned size, uint8_t ring)
483 {
484     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
485     uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
486     uint8_t nvp_blk;
487     Xive2Nvp nvp;
488     uint64_t phys_addr;
489     MemTxResult result;
490 
491     hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
492     nvp_blk = xive2_nvp_blk(hw_cam);
493     nvp_idx = xive2_nvp_idx(hw_cam);
494 
495     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
496         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
497                       nvp_blk, nvp_idx);
498         return;
499     }
500 
501     if (!xive2_nvp_is_valid(&nvp)) {
502         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
503                       nvp_blk, nvp_idx);
504         return;
505     }
506 
507     xive2_cfg = xive2_router_get_config(xrtr);
508 
509     phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
510     if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
511         uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
512 
513         xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
514         result = dma_memory_write(&address_space_memory, phys_addr,
515                                   pull_ctxt, REPORT_LINE_GEN1_SIZE,
516                                   MEMTXATTRS_UNSPECIFIED);
517         assert(result == MEMTX_OK);
518     } else {
519         result = dma_memory_write(&address_space_memory, phys_addr,
520                                   &tctx->regs, sizeof(tctx->regs),
521                                   MEMTXATTRS_UNSPECIFIED);
522         assert(result == MEMTX_OK);
523         reserved = 0xFFFFFFFF;
524         result = dma_memory_write(&address_space_memory, phys_addr + 12,
525                                   &reserved, sizeof(reserved),
526                                   MEMTXATTRS_UNSPECIFIED);
527         assert(result == MEMTX_OK);
528     }
529 
530     /* the rest is similar to pull context to registers */
531     xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
532 }
533 
534 void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
535                              hwaddr offset, uint64_t value, unsigned size)
536 {
537     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
538 }
539 
540 
541 void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
542                                hwaddr offset, uint64_t value, unsigned size)
543 {
544     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
545 }
546 
547 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
548                                         uint8_t nvp_blk, uint32_t nvp_idx,
549                                         Xive2Nvp *nvp)
550 {
551     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
552     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
553     uint8_t cppr;
554 
555     if (!xive2_nvp_is_hw(nvp)) {
556         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
557                       nvp_blk, nvp_idx);
558         return 0;
559     }
560 
561     cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
562     nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
563     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
564 
565     tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
566     tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2);
567     tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2);
568     tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2);
569 
570     nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
571     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
572     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
573 
574     /*
575      * Checkout privilege: 0:OS, 1:Pool, 2:Hard
576      *
577      * TODO: we only support OS push/pull
578      */
579     nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
580 
581     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
582 
583     /* return restored CPPR to generate a CPU exception if needed */
584     return cppr;
585 }
586 
587 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
588                                    uint8_t nvp_blk, uint32_t nvp_idx,
589                                    bool do_restore)
590 {
591     uint8_t ipb;
592     uint8_t backlog_level;
593     uint8_t backlog_prio;
594     uint8_t *regs = &tctx->regs[TM_QW1_OS];
595     Xive2Nvp nvp;
596 
597     /*
598      * Grab the associated thread interrupt context registers in the
599      * associated NVP
600      */
601     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
602         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
603                       nvp_blk, nvp_idx);
604         return;
605     }
606 
607     if (!xive2_nvp_is_valid(&nvp)) {
608         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
609                       nvp_blk, nvp_idx);
610         return;
611     }
612 
613     /* Automatically restore thread context registers */
614     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
615         do_restore) {
616         xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
617     }
618 
619     ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
620     if (ipb) {
621         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
622         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
623     }
624     regs[TM_IPB] |= ipb;
625     backlog_prio = xive_ipb_to_pipr(ipb);
626     backlog_level = 0;
627 
628     /*
629      * Compute the PIPR based on the restored state.
630      * It will raise the External interrupt signal if needed.
631      */
632     xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
633 }
634 
635 /*
636  * Updating the OS CAM line can trigger a resend of interrupt
637  */
638 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
639                           hwaddr offset, uint64_t value, unsigned size)
640 {
641     uint32_t cam;
642     uint32_t qw1w2;
643     uint64_t qw1dw1;
644     uint8_t nvp_blk;
645     uint32_t nvp_idx;
646     bool vo;
647     bool do_restore;
648 
649     /* First update the thead context */
650     switch (size) {
651     case 4:
652         cam = value;
653         qw1w2 = cpu_to_be32(cam);
654         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
655         break;
656     case 8:
657         cam = value >> 32;
658         qw1dw1 = cpu_to_be64(value);
659         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
660         break;
661     default:
662         g_assert_not_reached();
663     }
664 
665     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
666 
667     /* Check the interrupt pending bits */
668     if (vo) {
669         xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
670                                do_restore);
671     }
672 }
673 
674 static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
675 {
676     uint8_t *regs = &tctx->regs[ring];
677 
678     regs[TM_T] = target;
679 }
680 
681 void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
682                             hwaddr offset, uint64_t value, unsigned size)
683 {
684     xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
685 }
686 
687 /*
688  * XIVE Router (aka. Virtualization Controller or IVRE)
689  */
690 
691 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
692                          Xive2Eas *eas)
693 {
694     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
695 
696     return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
697 }
698 
699 static
700 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
701                        uint8_t *pq)
702 {
703     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
704 
705     return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
706 }
707 
708 static
709 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
710                        uint8_t *pq)
711 {
712     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
713 
714     return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
715 }
716 
717 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
718                          Xive2End *end)
719 {
720    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
721 
722    return xrc->get_end(xrtr, end_blk, end_idx, end);
723 }
724 
725 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
726                            Xive2End *end, uint8_t word_number)
727 {
728    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
729 
730    return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
731 }
732 
733 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
734                          Xive2Nvp *nvp)
735 {
736    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
737 
738    return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
739 }
740 
741 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
742                            Xive2Nvp *nvp, uint8_t word_number)
743 {
744    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
745 
746    return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
747 }
748 
749 int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
750                           uint8_t nvgc_blk, uint32_t nvgc_idx,
751                           Xive2Nvgc *nvgc)
752 {
753    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
754 
755    return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
756 }
757 
758 int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
759                             uint8_t nvgc_blk, uint32_t nvgc_idx,
760                             Xive2Nvgc *nvgc)
761 {
762    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
763 
764    return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
765 }
766 
767 static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2,
768                                 uint32_t vp_mask)
769 {
770     return (cam1 & vp_mask) == (cam2 & vp_mask);
771 }
772 
773 /*
774  * The thread context register words are in big-endian format.
775  */
776 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
777                                uint8_t format,
778                                uint8_t nvt_blk, uint32_t nvt_idx,
779                                bool cam_ignore, uint32_t logic_serv)
780 {
781     uint32_t cam =   xive2_nvp_cam_line(nvt_blk, nvt_idx);
782     uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
783     uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
784     uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
785     uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
786 
787     uint32_t vp_mask = 0xFFFFFFFF;
788 
789     if (format == 0) {
790         /*
791          * i=0: Specific NVT notification
792          * i=1: VP-group notification (bits ignored at the end of the
793          *      NVT identifier)
794          */
795         if (cam_ignore) {
796             vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1);
797         }
798 
799         /* For VP-group notifications, threads with LGS=0 are excluded */
800 
801         /* PHYS ring */
802         if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
803             !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) &&
804             xive2_vp_match_mask(cam,
805                                 xive2_tctx_hw_cam_line(xptr, tctx),
806                                 vp_mask)) {
807             return TM_QW3_HV_PHYS;
808         }
809 
810         /* HV POOL ring */
811         if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
812             !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) &&
813             xive2_vp_match_mask(cam,
814                                 xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2),
815                                 vp_mask)) {
816             return TM_QW2_HV_POOL;
817         }
818 
819         /* OS ring */
820         if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
821             !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) &&
822             xive2_vp_match_mask(cam,
823                                 xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2),
824                                 vp_mask)) {
825             return TM_QW1_OS;
826         }
827     } else {
828         /* F=1 : User level Event-Based Branch (EBB) notification */
829 
830         /* FIXME: what if cam_ignore and LGS = 0 ? */
831         /* USER ring */
832         if  ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
833              (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
834              (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
835              (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
836             return TM_QW0_USER;
837         }
838     }
839     return -1;
840 }
841 
842 bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
843 {
844     uint8_t *regs = &tctx->regs[ring];
845 
846     /*
847      * The xive2_presenter_tctx_match() above tells if there's a match
848      * but for VP-group notification, we still need to look at the
849      * priority to know if the thread can take the interrupt now or if
850      * it is precluded.
851      */
852     if (priority < regs[TM_CPPR]) {
853         return false;
854     }
855     return true;
856 }
857 
858 void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority)
859 {
860     uint8_t *regs = &tctx->regs[ring];
861 
862     /*
863      * Called by the router during a VP-group notification when the
864      * thread matches but can't take the interrupt because it's
865      * already running at a more favored priority. It then stores the
866      * new interrupt priority in the LSMFB field.
867      */
868     regs[TM_LSMFB] = priority;
869 }
870 
871 static void xive2_router_realize(DeviceState *dev, Error **errp)
872 {
873     Xive2Router *xrtr = XIVE2_ROUTER(dev);
874 
875     assert(xrtr->xfb);
876 }
877 
878 /*
879  * Notification using the END ESe/ESn bit (Event State Buffer for
880  * escalation and notification). Profide further coalescing in the
881  * Router.
882  */
883 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
884                                        uint32_t end_idx, Xive2End *end,
885                                        uint32_t end_esmask)
886 {
887     uint8_t pq = xive_get_field32(end_esmask, end->w1);
888     bool notify = xive_esb_trigger(&pq);
889 
890     if (pq != xive_get_field32(end_esmask, end->w1)) {
891         end->w1 = xive_set_field32(end_esmask, end->w1, pq);
892         xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
893     }
894 
895     /* ESe/n[Q]=1 : end of notification */
896     return notify;
897 }
898 
899 /*
900  * An END trigger can come from an event trigger (IPI or HW) or from
901  * another chip. We don't model the PowerBus but the END trigger
902  * message has the same parameters than in the function below.
903  */
904 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
905                                     uint32_t end_idx, uint32_t end_data)
906 {
907     Xive2End end;
908     uint8_t priority;
909     uint8_t format;
910     bool found, precluded;
911     uint8_t nvp_blk;
912     uint32_t nvp_idx;
913 
914     /* END cache lookup */
915     if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
916         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
917                       end_idx);
918         return;
919     }
920 
921     if (!xive2_end_is_valid(&end)) {
922         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
923                       end_blk, end_idx);
924         return;
925     }
926 
927     if (xive2_end_is_enqueue(&end)) {
928         xive2_end_enqueue(&end, end_data);
929         /* Enqueuing event data modifies the EQ toggle and index */
930         xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
931     }
932 
933     /*
934      * When the END is silent, we skip the notification part.
935      */
936     if (xive2_end_is_silent_escalation(&end)) {
937         goto do_escalation;
938     }
939 
940     /*
941      * The W7 format depends on the F bit in W6. It defines the type
942      * of the notification :
943      *
944      *   F=0 : single or multiple NVP notification
945      *   F=1 : User level Event-Based Branch (EBB) notification, no
946      *         priority
947      */
948     format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
949     priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
950 
951     /* The END is masked */
952     if (format == 0 && priority == 0xff) {
953         return;
954     }
955 
956     /*
957      * Check the END ESn (Event State Buffer for notification) for
958      * even further coalescing in the Router
959      */
960     if (!xive2_end_is_notify(&end)) {
961         /* ESn[Q]=1 : end of notification */
962         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
963                                        &end, END2_W1_ESn)) {
964             return;
965         }
966     }
967 
968     /*
969      * Follows IVPE notification
970      */
971     nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
972     nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
973 
974     found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
975                           xive2_end_is_ignore(&end),
976                           priority,
977                           xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7),
978                           &precluded);
979 
980     /* TODO: Auto EOI. */
981 
982     if (found) {
983         return;
984     }
985 
986     /*
987      * If no matching NVP is dispatched on a HW thread :
988      * - specific VP: update the NVP structure if backlog is activated
989      * - VP-group: update the backlog counter for that priority in the NVG
990      */
991     if (xive2_end_is_backlog(&end)) {
992 
993         if (format == 1) {
994             qemu_log_mask(LOG_GUEST_ERROR,
995                           "XIVE: END %x/%x invalid config: F1 & backlog\n",
996                           end_blk, end_idx);
997             return;
998         }
999 
1000         if (!xive2_end_is_ignore(&end)) {
1001             uint8_t ipb;
1002             Xive2Nvp nvp;
1003 
1004             /* NVP cache lookup */
1005             if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
1006                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
1007                               nvp_blk, nvp_idx);
1008                 return;
1009             }
1010 
1011             if (!xive2_nvp_is_valid(&nvp)) {
1012                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
1013                               nvp_blk, nvp_idx);
1014                 return;
1015             }
1016 
1017             /*
1018              * Record the IPB in the associated NVP structure for later
1019              * use. The presenter will resend the interrupt when the vCPU
1020              * is dispatched again on a HW thread.
1021              */
1022             ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
1023                 xive_priority_to_ipb(priority);
1024             nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
1025             xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
1026         } else {
1027             Xive2Nvgc nvg;
1028             uint32_t backlog;
1029 
1030             /* For groups, the per-priority backlog counters are in the NVG */
1031             if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) {
1032                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n",
1033                               nvp_blk, nvp_idx);
1034                 return;
1035             }
1036 
1037             if (!xive2_nvgc_is_valid(&nvg)) {
1038                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n",
1039                               nvp_blk, nvp_idx);
1040                 return;
1041             }
1042 
1043             /*
1044              * Increment the backlog counter for that priority.
1045              * We only call broadcast the first time the counter is
1046              * incremented. broadcast will set the LSMFB field of the TIMA of
1047              * relevant threads so that they know an interrupt is pending.
1048              */
1049             backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1;
1050             xive2_nvgc_set_backlog(&nvg, priority, backlog);
1051             xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg);
1052 
1053             if (backlog == 1) {
1054                 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb);
1055                 xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority);
1056 
1057                 if (!xive2_end_is_precluded_escalation(&end)) {
1058                     /*
1059                      * The interrupt will be picked up when the
1060                      * matching thread lowers its priority level
1061                      */
1062                     return;
1063                 }
1064             }
1065         }
1066     }
1067 
1068 do_escalation:
1069     /*
1070      * If activated, escalate notification using the ESe PQ bits and
1071      * the EAS in w4-5
1072      */
1073     if (!xive2_end_is_escalate(&end)) {
1074         return;
1075     }
1076 
1077     /*
1078      * Check the END ESe (Event State Buffer for escalation) for even
1079      * further coalescing in the Router
1080      */
1081     if (!xive2_end_is_uncond_escalation(&end)) {
1082         /* ESe[Q]=1 : end of escalation notification */
1083         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
1084                                        &end, END2_W1_ESe)) {
1085             return;
1086         }
1087     }
1088 
1089     /*
1090      * The END trigger becomes an Escalation trigger
1091      */
1092     xive2_router_end_notify(xrtr,
1093                            xive_get_field32(END2_W4_END_BLOCK,     end.w4),
1094                            xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
1095                            xive_get_field32(END2_W5_ESC_END_DATA,  end.w5));
1096 }
1097 
1098 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
1099 {
1100     Xive2Router *xrtr = XIVE2_ROUTER(xn);
1101     uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1102     uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1103     Xive2Eas eas;
1104 
1105     /* EAS cache lookup */
1106     if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1107         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1108         return;
1109     }
1110 
1111     if (!pq_checked) {
1112         bool notify;
1113         uint8_t pq;
1114 
1115         /* PQ cache lookup */
1116         if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
1117             /* Set FIR */
1118             g_assert_not_reached();
1119         }
1120 
1121         notify = xive_esb_trigger(&pq);
1122 
1123         if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
1124             /* Set FIR */
1125             g_assert_not_reached();
1126         }
1127 
1128         if (!notify) {
1129             return;
1130         }
1131     }
1132 
1133     if (!xive2_eas_is_valid(&eas)) {
1134         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
1135         return;
1136     }
1137 
1138     if (xive2_eas_is_masked(&eas)) {
1139         /* Notification completed */
1140         return;
1141     }
1142 
1143     /*
1144      * The event trigger becomes an END trigger
1145      */
1146     xive2_router_end_notify(xrtr,
1147                              xive_get_field64(EAS2_END_BLOCK, eas.w),
1148                              xive_get_field64(EAS2_END_INDEX, eas.w),
1149                              xive_get_field64(EAS2_END_DATA,  eas.w));
1150 }
1151 
1152 static const Property xive2_router_properties[] = {
1153     DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
1154                      TYPE_XIVE_FABRIC, XiveFabric *),
1155 };
1156 
1157 static void xive2_router_class_init(ObjectClass *klass, void *data)
1158 {
1159     DeviceClass *dc = DEVICE_CLASS(klass);
1160     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1161 
1162     dc->desc    = "XIVE2 Router Engine";
1163     device_class_set_props(dc, xive2_router_properties);
1164     /* Parent is SysBusDeviceClass. No need to call its realize hook */
1165     dc->realize = xive2_router_realize;
1166     xnc->notify = xive2_router_notify;
1167 }
1168 
1169 static const TypeInfo xive2_router_info = {
1170     .name          = TYPE_XIVE2_ROUTER,
1171     .parent        = TYPE_SYS_BUS_DEVICE,
1172     .abstract      = true,
1173     .instance_size = sizeof(Xive2Router),
1174     .class_size    = sizeof(Xive2RouterClass),
1175     .class_init    = xive2_router_class_init,
1176     .interfaces    = (InterfaceInfo[]) {
1177         { TYPE_XIVE_NOTIFIER },
1178         { TYPE_XIVE_PRESENTER },
1179         { }
1180     }
1181 };
1182 
1183 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
1184 {
1185     return !((addr >> shift) & 1);
1186 }
1187 
1188 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
1189 {
1190     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1191     uint32_t offset = addr & 0xFFF;
1192     uint8_t end_blk;
1193     uint32_t end_idx;
1194     Xive2End end;
1195     uint32_t end_esmask;
1196     uint8_t pq;
1197     uint64_t ret;
1198 
1199     /*
1200      * The block id should be deduced from the load address on the END
1201      * ESB MMIO but our model only supports a single block per XIVE chip.
1202      */
1203     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1204     end_idx = addr >> (xsrc->esb_shift + 1);
1205 
1206     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1207         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1208                       end_idx);
1209         return -1;
1210     }
1211 
1212     if (!xive2_end_is_valid(&end)) {
1213         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1214                       end_blk, end_idx);
1215         return -1;
1216     }
1217 
1218     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1219         END2_W1_ESe;
1220     pq = xive_get_field32(end_esmask, end.w1);
1221 
1222     switch (offset) {
1223     case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1224         ret = xive_esb_eoi(&pq);
1225 
1226         /* Forward the source event notification for routing ?? */
1227         break;
1228 
1229     case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1230         ret = pq;
1231         break;
1232 
1233     case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1234     case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1235     case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1236     case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1237         ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1238         break;
1239     default:
1240         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1241                       offset);
1242         return -1;
1243     }
1244 
1245     if (pq != xive_get_field32(end_esmask, end.w1)) {
1246         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1247         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1248     }
1249 
1250     return ret;
1251 }
1252 
1253 static void xive2_end_source_write(void *opaque, hwaddr addr,
1254                                    uint64_t value, unsigned size)
1255 {
1256     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1257     uint32_t offset = addr & 0xFFF;
1258     uint8_t end_blk;
1259     uint32_t end_idx;
1260     Xive2End end;
1261     uint32_t end_esmask;
1262     uint8_t pq;
1263     bool notify = false;
1264 
1265     /*
1266      * The block id should be deduced from the load address on the END
1267      * ESB MMIO but our model only supports a single block per XIVE chip.
1268      */
1269     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1270     end_idx = addr >> (xsrc->esb_shift + 1);
1271 
1272     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1273         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1274                       end_idx);
1275         return;
1276     }
1277 
1278     if (!xive2_end_is_valid(&end)) {
1279         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1280                       end_blk, end_idx);
1281         return;
1282     }
1283 
1284     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1285         END2_W1_ESe;
1286     pq = xive_get_field32(end_esmask, end.w1);
1287 
1288     switch (offset) {
1289     case 0 ... 0x3FF:
1290         notify = xive_esb_trigger(&pq);
1291         break;
1292 
1293     case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1294         /* TODO: can we check StoreEOI availability from the router ? */
1295         notify = xive_esb_eoi(&pq);
1296         break;
1297 
1298     case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1299         if (end_esmask == END2_W1_ESe) {
1300             qemu_log_mask(LOG_GUEST_ERROR,
1301                           "XIVE: END %x/%x can not EQ inject on ESe\n",
1302                            end_blk, end_idx);
1303             return;
1304         }
1305         notify = true;
1306         break;
1307 
1308     default:
1309         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
1310                       offset);
1311         return;
1312     }
1313 
1314     if (pq != xive_get_field32(end_esmask, end.w1)) {
1315         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1316         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1317     }
1318 
1319     /* TODO: Forward the source event notification for routing */
1320     if (notify) {
1321         ;
1322     }
1323 }
1324 
1325 static const MemoryRegionOps xive2_end_source_ops = {
1326     .read = xive2_end_source_read,
1327     .write = xive2_end_source_write,
1328     .endianness = DEVICE_BIG_ENDIAN,
1329     .valid = {
1330         .min_access_size = 1,
1331         .max_access_size = 8,
1332     },
1333     .impl = {
1334         .min_access_size = 1,
1335         .max_access_size = 8,
1336     },
1337 };
1338 
1339 static void xive2_end_source_realize(DeviceState *dev, Error **errp)
1340 {
1341     Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
1342 
1343     assert(xsrc->xrtr);
1344 
1345     if (!xsrc->nr_ends) {
1346         error_setg(errp, "Number of interrupt needs to be greater than 0");
1347         return;
1348     }
1349 
1350     if (xsrc->esb_shift != XIVE_ESB_4K &&
1351         xsrc->esb_shift != XIVE_ESB_64K) {
1352         error_setg(errp, "Invalid ESB shift setting");
1353         return;
1354     }
1355 
1356     /*
1357      * Each END is assigned an even/odd pair of MMIO pages, the even page
1358      * manages the ESn field while the odd page manages the ESe field.
1359      */
1360     memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1361                           &xive2_end_source_ops, xsrc, "xive.end",
1362                           (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1363 }
1364 
1365 static const Property xive2_end_source_properties[] = {
1366     DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
1367     DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
1368     DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
1369                      Xive2Router *),
1370 };
1371 
1372 static void xive2_end_source_class_init(ObjectClass *klass, void *data)
1373 {
1374     DeviceClass *dc = DEVICE_CLASS(klass);
1375 
1376     dc->desc    = "XIVE END Source";
1377     device_class_set_props(dc, xive2_end_source_properties);
1378     dc->realize = xive2_end_source_realize;
1379     dc->user_creatable = false;
1380 }
1381 
1382 static const TypeInfo xive2_end_source_info = {
1383     .name          = TYPE_XIVE2_END_SOURCE,
1384     .parent        = TYPE_DEVICE,
1385     .instance_size = sizeof(Xive2EndSource),
1386     .class_init    = xive2_end_source_class_init,
1387 };
1388 
1389 static void xive2_register_types(void)
1390 {
1391     type_register_static(&xive2_router_info);
1392     type_register_static(&xive2_end_source_info);
1393 }
1394 
1395 type_init(xive2_register_types)
1396