xref: /openbmc/qemu/hw/intc/xive2.c (revision 19db3b5a247c57a40d7e8a545a8dee9faf4db150)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3  *
4  * Copyright (c) 2019-2024, IBM Corporation..
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "system/cpus.h"
15 #include "system/dma.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/ppc/xive.h"
18 #include "hw/ppc/xive2.h"
19 #include "hw/ppc/xive2_regs.h"
20 
21 uint32_t xive2_router_get_config(Xive2Router *xrtr)
22 {
23     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
24 
25     return xrc->get_config(xrtr);
26 }
27 
28 static int xive2_router_get_block_id(Xive2Router *xrtr)
29 {
30    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
31 
32    return xrc->get_block_id(xrtr);
33 }
34 
35 static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
36 {
37     uint64_t cache_addr;
38 
39     cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
40         xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
41     cache_addr <<= 8; /* aligned on a cache line pair */
42     return cache_addr;
43 }
44 
45 static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
46 {
47     uint32_t val = 0;
48     uint8_t *ptr, i;
49 
50     if (priority > 7) {
51         return 0;
52     }
53 
54     /*
55      * The per-priority backlog counters are 24-bit and the structure
56      * is stored in big endian
57      */
58     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
59     for (i = 0; i < 3; i++, ptr++) {
60         val = (val << 8) + *ptr;
61     }
62     return val;
63 }
64 
65 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
66 {
67     if (!xive2_eas_is_valid(eas)) {
68         return;
69     }
70 
71     g_string_append_printf(buf, "  %08x %s end:%02x/%04x data:%08x\n",
72                            lisn, xive2_eas_is_masked(eas) ? "M" : " ",
73                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
74                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
75                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
76 }
77 
78 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf)
79 {
80     uint64_t qaddr_base = xive2_end_qaddr(end);
81     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
82     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
83     uint32_t qentries = 1 << (qsize + 10);
84     int i;
85 
86     /*
87      * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
88      */
89     g_string_append_printf(buf, " [ ");
90     qindex = (qindex - (width - 1)) & (qentries - 1);
91     for (i = 0; i < width; i++) {
92         uint64_t qaddr = qaddr_base + (qindex << 2);
93         uint32_t qdata = -1;
94 
95         if (dma_memory_read(&address_space_memory, qaddr, &qdata,
96                             sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
97             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
98                           HWADDR_PRIx "\n", qaddr);
99             return;
100         }
101         g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
102                                be32_to_cpu(qdata));
103         qindex = (qindex + 1) & (qentries - 1);
104     }
105     g_string_append_printf(buf, "]");
106 }
107 
108 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
109 {
110     uint64_t qaddr_base = xive2_end_qaddr(end);
111     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
112     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
113     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
114     uint32_t qentries = 1 << (qsize + 10);
115 
116     uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
117     uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
118     uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
119     uint8_t pq;
120 
121     if (!xive2_end_is_valid(end)) {
122         return;
123     }
124 
125     pq = xive_get_field32(END2_W1_ESn, end->w1);
126 
127     g_string_append_printf(buf,
128                            "  %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
129                            "prio:%d nvp:%02x/%04x",
130                            end_idx,
131                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
132                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
133                            xive2_end_is_valid(end)    ? 'v' : '-',
134                            xive2_end_is_enqueue(end)  ? 'q' : '-',
135                            xive2_end_is_notify(end)   ? 'n' : '-',
136                            xive2_end_is_backlog(end)  ? 'b' : '-',
137                            xive2_end_is_precluded_escalation(end) ? 'p' : '-',
138                            xive2_end_is_escalate(end) ? 'e' : '-',
139                            xive2_end_is_escalate_end(end) ? 'N' : '-',
140                            xive2_end_is_uncond_escalation(end)   ? 'u' : '-',
141                            xive2_end_is_silent_escalation(end)   ? 's' : '-',
142                            xive2_end_is_firmware1(end)   ? 'f' : '-',
143                            xive2_end_is_firmware2(end)   ? 'F' : '-',
144                            xive2_end_is_ignore(end) ? 'i' : '-',
145                            xive2_end_is_crowd(end)  ? 'c' : '-',
146                            priority, nvp_blk, nvp_idx);
147 
148     if (qaddr_base) {
149         g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
150                                qaddr_base, qindex, qentries, qgen);
151         xive2_end_queue_pic_print_info(end, 6, buf);
152     }
153     g_string_append_c(buf, '\n');
154 }
155 
156 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
157                                   GString *buf)
158 {
159     Xive2Eas *eas = (Xive2Eas *) &end->w4;
160     uint8_t pq;
161 
162     if (!xive2_end_is_escalate(end)) {
163         return;
164     }
165 
166     pq = xive_get_field32(END2_W1_ESe, end->w1);
167 
168     g_string_append_printf(buf, "  %08x %c%c %c%c end:%02x/%04x data:%08x\n",
169                            end_idx,
170                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
171                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
172                            xive2_eas_is_valid(eas) ? 'v' : ' ',
173                            xive2_eas_is_masked(eas) ? 'M' : ' ',
174                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
175                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
176                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
177 }
178 
179 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
180 {
181     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
182     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
183     uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
184 
185     if (!xive2_nvp_is_valid(nvp)) {
186         return;
187     }
188 
189     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
190                            nvp_idx, eq_blk, eq_idx,
191                            xive_get_field32(NVP2_W2_IPB, nvp->w2),
192                            xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
193     if (cache_line) {
194         g_string_append_printf(buf, "  reporting CL:%016"PRIx64, cache_line);
195     }
196 
197     /*
198      * When the NVP is HW controlled, more fields are updated
199      */
200     if (xive2_nvp_is_hw(nvp)) {
201         g_string_append_printf(buf, " CPPR:%02x",
202                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
203         if (xive2_nvp_is_co(nvp)) {
204             g_string_append_printf(buf, " CO:%04x",
205                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
206         }
207     }
208     g_string_append_c(buf, '\n');
209 }
210 
211 void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
212 {
213     uint8_t i;
214 
215     if (!xive2_nvgc_is_valid(nvgc)) {
216         return;
217     }
218 
219     g_string_append_printf(buf, "  %08x PGoNext:%02x bklog: ", nvgc_idx,
220                            xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
221     for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
222         g_string_append_printf(buf, "[%d]=0x%x ",
223                                i, xive2_nvgc_get_backlog(nvgc, i));
224     }
225     g_string_append_printf(buf, "\n");
226 }
227 
228 static void xive2_end_enqueue(Xive2End *end, uint32_t data)
229 {
230     uint64_t qaddr_base = xive2_end_qaddr(end);
231     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
232     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
233     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
234 
235     uint64_t qaddr = qaddr_base + (qindex << 2);
236     uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
237     uint32_t qentries = 1 << (qsize + 10);
238 
239     if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
240                          MEMTXATTRS_UNSPECIFIED)) {
241         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
242                       HWADDR_PRIx "\n", qaddr);
243         return;
244     }
245 
246     qindex = (qindex + 1) & (qentries - 1);
247     if (qindex == 0) {
248         qgen ^= 1;
249         end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
250 
251         /* TODO(PowerNV): reset GF bit on a cache watch operation */
252         end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
253     }
254     end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
255 }
256 
257 /*
258  * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
259  *
260  * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
261  *
262  *   - if a context is enabled with the H bit set, the VP context
263  *     information is retrieved from the NVP structure (“check out”)
264  *     and stored back on a context pull (“check in”), the SW receives
265  *     the same context pull information as on P9
266  *
267  *   - the H bit cannot be changed while the V bit is set, i.e. a
268  *     context cannot be set up in the TIMA and then be “pushed” into
269  *     the NVP by changing the H bit while the context is enabled
270  */
271 
272 static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
273                                 uint8_t nvp_blk, uint32_t nvp_idx,
274                                 uint8_t ring)
275 {
276     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
277     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
278     Xive2Nvp nvp;
279     uint8_t *regs = &tctx->regs[ring];
280 
281     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
282         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
283                           nvp_blk, nvp_idx);
284         return;
285     }
286 
287     if (!xive2_nvp_is_valid(&nvp)) {
288         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
289                       nvp_blk, nvp_idx);
290         return;
291     }
292 
293     if (!xive2_nvp_is_hw(&nvp)) {
294         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
295                       nvp_blk, nvp_idx);
296         return;
297     }
298 
299     if (!xive2_nvp_is_co(&nvp)) {
300         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
301                       nvp_blk, nvp_idx);
302         return;
303     }
304 
305     if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
306         xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
307         qemu_log_mask(LOG_GUEST_ERROR,
308                       "XIVE: NVP %x/%x invalid checkout Thread %x\n",
309                       nvp_blk, nvp_idx, pir);
310         return;
311     }
312 
313     nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
314     nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
315     if (nvp.w0 & NVP2_W0_L) {
316         /*
317          * Typically not used. If LSMFB is restored with 0, it will
318          * force a backlog rescan
319          */
320         nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
321     }
322     if (nvp.w0 & NVP2_W0_G) {
323         nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]);
324     }
325     if (nvp.w0 & NVP2_W0_T) {
326         nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]);
327     }
328     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
329 
330     nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
331     /* NVP2_W1_CO_THRID_VALID only set once */
332     nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
333     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
334 }
335 
336 static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
337                              uint32_t *nvp_idx, bool *valid, bool *hw)
338 {
339     *nvp_blk = xive2_nvp_blk(cam);
340     *nvp_idx = xive2_nvp_idx(cam);
341     *valid = !!(cam & TM2_W2_VALID);
342     *hw = !!(cam & TM2_W2_HW);
343 }
344 
345 /*
346  * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
347  * width and block id width is configurable at the IC level.
348  *
349  *    chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
350  *    chipid << 24 | 0000 0000 0000 0001 threadid   (8Bit)
351  */
352 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
353 {
354     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
355     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
356     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
357     uint8_t blk = xive2_router_get_block_id(xrtr);
358     uint8_t tid_shift =
359         xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
360     uint8_t tid_mask = (1 << tid_shift) - 1;
361 
362     return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
363 }
364 
365 static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
366                                   hwaddr offset, unsigned size, uint8_t ring)
367 {
368     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
369     uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
370     uint32_t cam = be32_to_cpu(target_ringw2);
371     uint8_t nvp_blk;
372     uint32_t nvp_idx;
373     uint8_t cur_ring;
374     bool valid;
375     bool do_save;
376 
377     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
378 
379     if (!valid) {
380         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
381                       nvp_blk, nvp_idx);
382     }
383 
384     /* Invalidate CAM line of requested ring and all lower rings */
385     for (cur_ring = TM_QW0_USER; cur_ring <= ring;
386          cur_ring += XIVE_TM_RING_SIZE) {
387         uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
388         uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
389         memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
390     }
391 
392     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
393         xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
394     }
395 
396     /*
397      * Lower external interrupt line of requested ring and below except for
398      * USER, which doesn't exist.
399      */
400     for (cur_ring = TM_QW1_OS; cur_ring <= ring;
401          cur_ring += XIVE_TM_RING_SIZE) {
402         xive_tctx_reset_signal(tctx, cur_ring);
403     }
404     return target_ringw2;
405 }
406 
407 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
408                               hwaddr offset, unsigned size)
409 {
410     return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
411 }
412 
413 #define REPORT_LINE_GEN1_SIZE       16
414 
415 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
416                                       uint8_t size)
417 {
418     uint8_t *regs = tctx->regs;
419 
420     g_assert(size == REPORT_LINE_GEN1_SIZE);
421     memset(data, 0, size);
422     /*
423      * See xive architecture for description of what is saved. It is
424      * hand-picked information to fit in 16 bytes.
425      */
426     data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
427     data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
428     data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
429     data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
430     data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
431     data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
432     data[0x6] = 0xFF;
433     data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
434     data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
435     data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
436     data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
437     data[0x8] = regs[TM_QW1_OS + TM_NSR];
438     data[0x9] = regs[TM_QW1_OS + TM_CPPR];
439     data[0xA] = regs[TM_QW1_OS + TM_IPB];
440     data[0xB] = regs[TM_QW1_OS + TM_LGS];
441     if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
442         /*
443          * Logical server extension, except VU bit replaced by EB bit
444          * from NSR
445          */
446         data[0xC] = regs[TM_QW0_USER + TM_WORD2];
447         data[0xC] &= ~0x80;
448         data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
449         data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
450         data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
451         data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
452     }
453 }
454 
455 static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
456                                  hwaddr offset, uint64_t value,
457                                  unsigned size, uint8_t ring)
458 {
459     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
460     uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
461     uint8_t nvp_blk;
462     Xive2Nvp nvp;
463     uint64_t phys_addr;
464     MemTxResult result;
465 
466     hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
467     nvp_blk = xive2_nvp_blk(hw_cam);
468     nvp_idx = xive2_nvp_idx(hw_cam);
469 
470     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
471         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
472                       nvp_blk, nvp_idx);
473         return;
474     }
475 
476     if (!xive2_nvp_is_valid(&nvp)) {
477         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
478                       nvp_blk, nvp_idx);
479         return;
480     }
481 
482     xive2_cfg = xive2_router_get_config(xrtr);
483 
484     phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
485     if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
486         uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
487 
488         xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
489         result = dma_memory_write(&address_space_memory, phys_addr,
490                                   pull_ctxt, REPORT_LINE_GEN1_SIZE,
491                                   MEMTXATTRS_UNSPECIFIED);
492         assert(result == MEMTX_OK);
493     } else {
494         result = dma_memory_write(&address_space_memory, phys_addr,
495                                   &tctx->regs, sizeof(tctx->regs),
496                                   MEMTXATTRS_UNSPECIFIED);
497         assert(result == MEMTX_OK);
498         reserved = 0xFFFFFFFF;
499         result = dma_memory_write(&address_space_memory, phys_addr + 12,
500                                   &reserved, sizeof(reserved),
501                                   MEMTXATTRS_UNSPECIFIED);
502         assert(result == MEMTX_OK);
503     }
504 
505     /* the rest is similar to pull context to registers */
506     xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
507 }
508 
509 void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
510                              hwaddr offset, uint64_t value, unsigned size)
511 {
512     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
513 }
514 
515 
516 void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
517                                hwaddr offset, uint64_t value, unsigned size)
518 {
519     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
520 }
521 
522 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
523                                         uint8_t nvp_blk, uint32_t nvp_idx,
524                                         Xive2Nvp *nvp)
525 {
526     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
527     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
528     uint8_t cppr;
529 
530     if (!xive2_nvp_is_hw(nvp)) {
531         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
532                       nvp_blk, nvp_idx);
533         return 0;
534     }
535 
536     cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
537     nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
538     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
539 
540     tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
541     tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2);
542     tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2);
543     tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2);
544 
545     nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
546     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
547     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
548 
549     /*
550      * Checkout privilege: 0:OS, 1:Pool, 2:Hard
551      *
552      * TODO: we only support OS push/pull
553      */
554     nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
555 
556     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
557 
558     /* return restored CPPR to generate a CPU exception if needed */
559     return cppr;
560 }
561 
562 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
563                                    uint8_t nvp_blk, uint32_t nvp_idx,
564                                    bool do_restore)
565 {
566     Xive2Nvp nvp;
567     uint8_t ipb;
568 
569     /*
570      * Grab the associated thread interrupt context registers in the
571      * associated NVP
572      */
573     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
574         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
575                       nvp_blk, nvp_idx);
576         return;
577     }
578 
579     if (!xive2_nvp_is_valid(&nvp)) {
580         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
581                       nvp_blk, nvp_idx);
582         return;
583     }
584 
585     /* Automatically restore thread context registers */
586     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
587         do_restore) {
588         xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
589     }
590 
591     ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
592     if (ipb) {
593         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
594         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
595     }
596     /*
597      * Always call xive_tctx_ipb_update(). Even if there were no
598      * escalation triggered, there could be a pending interrupt which
599      * was saved when the context was pulled and that we need to take
600      * into account by recalculating the PIPR (which is not
601      * saved/restored).
602      * It will also raise the External interrupt signal if needed.
603      */
604     xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
605 }
606 
607 /*
608  * Updating the OS CAM line can trigger a resend of interrupt
609  */
610 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
611                           hwaddr offset, uint64_t value, unsigned size)
612 {
613     uint32_t cam;
614     uint32_t qw1w2;
615     uint64_t qw1dw1;
616     uint8_t nvp_blk;
617     uint32_t nvp_idx;
618     bool vo;
619     bool do_restore;
620 
621     /* First update the thead context */
622     switch (size) {
623     case 4:
624         cam = value;
625         qw1w2 = cpu_to_be32(cam);
626         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
627         break;
628     case 8:
629         cam = value >> 32;
630         qw1dw1 = cpu_to_be64(value);
631         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
632         break;
633     default:
634         g_assert_not_reached();
635     }
636 
637     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
638 
639     /* Check the interrupt pending bits */
640     if (vo) {
641         xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
642                                do_restore);
643     }
644 }
645 
646 static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
647 {
648     uint8_t *regs = &tctx->regs[ring];
649 
650     regs[TM_T] = target;
651 }
652 
653 void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
654                             hwaddr offset, uint64_t value, unsigned size)
655 {
656     xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
657 }
658 
659 /*
660  * XIVE Router (aka. Virtualization Controller or IVRE)
661  */
662 
663 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
664                          Xive2Eas *eas)
665 {
666     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
667 
668     return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
669 }
670 
671 static
672 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
673                        uint8_t *pq)
674 {
675     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
676 
677     return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
678 }
679 
680 static
681 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
682                        uint8_t *pq)
683 {
684     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
685 
686     return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
687 }
688 
689 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
690                          Xive2End *end)
691 {
692    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
693 
694    return xrc->get_end(xrtr, end_blk, end_idx, end);
695 }
696 
697 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
698                            Xive2End *end, uint8_t word_number)
699 {
700    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
701 
702    return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
703 }
704 
705 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
706                          Xive2Nvp *nvp)
707 {
708    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
709 
710    return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
711 }
712 
713 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
714                            Xive2Nvp *nvp, uint8_t word_number)
715 {
716    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
717 
718    return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
719 }
720 
721 int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
722                           uint8_t nvgc_blk, uint32_t nvgc_idx,
723                           Xive2Nvgc *nvgc)
724 {
725    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
726 
727    return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
728 }
729 
730 int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
731                             uint8_t nvgc_blk, uint32_t nvgc_idx,
732                             Xive2Nvgc *nvgc)
733 {
734    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
735 
736    return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
737 }
738 
739 /*
740  * The thread context register words are in big-endian format.
741  */
742 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
743                                uint8_t format,
744                                uint8_t nvt_blk, uint32_t nvt_idx,
745                                bool cam_ignore, uint32_t logic_serv)
746 {
747     uint32_t cam =   xive2_nvp_cam_line(nvt_blk, nvt_idx);
748     uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
749     uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
750     uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
751     uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
752 
753     /*
754      * TODO (PowerNV): ignore mode. The low order bits of the NVT
755      * identifier are ignored in the "CAM" match.
756      */
757 
758     if (format == 0) {
759         if (cam_ignore == true) {
760             /*
761              * F=0 & i=1: Logical server notification (bits ignored at
762              * the end of the NVT identifier)
763              */
764             qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
765                           nvt_blk, nvt_idx);
766             return -1;
767         }
768 
769         /* F=0 & i=0: Specific NVT notification */
770 
771         /* PHYS ring */
772         if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
773             cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
774             return TM_QW3_HV_PHYS;
775         }
776 
777         /* HV POOL ring */
778         if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
779             cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
780             return TM_QW2_HV_POOL;
781         }
782 
783         /* OS ring */
784         if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
785             cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
786             return TM_QW1_OS;
787         }
788     } else {
789         /* F=1 : User level Event-Based Branch (EBB) notification */
790 
791         /* USER ring */
792         if  ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
793              (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
794              (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
795              (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
796             return TM_QW0_USER;
797         }
798     }
799     return -1;
800 }
801 
802 static void xive2_router_realize(DeviceState *dev, Error **errp)
803 {
804     Xive2Router *xrtr = XIVE2_ROUTER(dev);
805 
806     assert(xrtr->xfb);
807 }
808 
809 /*
810  * Notification using the END ESe/ESn bit (Event State Buffer for
811  * escalation and notification). Profide further coalescing in the
812  * Router.
813  */
814 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
815                                        uint32_t end_idx, Xive2End *end,
816                                        uint32_t end_esmask)
817 {
818     uint8_t pq = xive_get_field32(end_esmask, end->w1);
819     bool notify = xive_esb_trigger(&pq);
820 
821     if (pq != xive_get_field32(end_esmask, end->w1)) {
822         end->w1 = xive_set_field32(end_esmask, end->w1, pq);
823         xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
824     }
825 
826     /* ESe/n[Q]=1 : end of notification */
827     return notify;
828 }
829 
830 /*
831  * An END trigger can come from an event trigger (IPI or HW) or from
832  * another chip. We don't model the PowerBus but the END trigger
833  * message has the same parameters than in the function below.
834  */
835 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
836                                     uint32_t end_idx, uint32_t end_data)
837 {
838     Xive2End end;
839     uint8_t priority;
840     uint8_t format;
841     bool found;
842     Xive2Nvp nvp;
843     uint8_t nvp_blk;
844     uint32_t nvp_idx;
845 
846     /* END cache lookup */
847     if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
848         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
849                       end_idx);
850         return;
851     }
852 
853     if (!xive2_end_is_valid(&end)) {
854         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
855                       end_blk, end_idx);
856         return;
857     }
858 
859     if (xive2_end_is_enqueue(&end)) {
860         xive2_end_enqueue(&end, end_data);
861         /* Enqueuing event data modifies the EQ toggle and index */
862         xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
863     }
864 
865     /*
866      * When the END is silent, we skip the notification part.
867      */
868     if (xive2_end_is_silent_escalation(&end)) {
869         goto do_escalation;
870     }
871 
872     /*
873      * The W7 format depends on the F bit in W6. It defines the type
874      * of the notification :
875      *
876      *   F=0 : single or multiple NVP notification
877      *   F=1 : User level Event-Based Branch (EBB) notification, no
878      *         priority
879      */
880     format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
881     priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
882 
883     /* The END is masked */
884     if (format == 0 && priority == 0xff) {
885         return;
886     }
887 
888     /*
889      * Check the END ESn (Event State Buffer for notification) for
890      * even further coalescing in the Router
891      */
892     if (!xive2_end_is_notify(&end)) {
893         /* ESn[Q]=1 : end of notification */
894         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
895                                        &end, END2_W1_ESn)) {
896             return;
897         }
898     }
899 
900     /*
901      * Follows IVPE notification
902      */
903     nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
904     nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
905 
906     /* NVP cache lookup */
907     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
908         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
909                       nvp_blk, nvp_idx);
910         return;
911     }
912 
913     if (!xive2_nvp_is_valid(&nvp)) {
914         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
915                       nvp_blk, nvp_idx);
916         return;
917     }
918 
919     found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
920                           xive2_end_is_ignore(&end),
921                           priority,
922                           xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
923 
924     /* TODO: Auto EOI. */
925 
926     if (found) {
927         return;
928     }
929 
930     /*
931      * If no matching NVP is dispatched on a HW thread :
932      * - specific VP: update the NVP structure if backlog is activated
933      * - logical server : forward request to IVPE (not supported)
934      */
935     if (xive2_end_is_backlog(&end)) {
936         uint8_t ipb;
937 
938         if (format == 1) {
939             qemu_log_mask(LOG_GUEST_ERROR,
940                           "XIVE: END %x/%x invalid config: F1 & backlog\n",
941                           end_blk, end_idx);
942             return;
943         }
944 
945         /*
946          * Record the IPB in the associated NVP structure for later
947          * use. The presenter will resend the interrupt when the vCPU
948          * is dispatched again on a HW thread.
949          */
950         ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
951             xive_priority_to_ipb(priority);
952         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
953         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
954 
955         /*
956          * On HW, follows a "Broadcast Backlog" to IVPEs
957          */
958     }
959 
960 do_escalation:
961     /*
962      * If activated, escalate notification using the ESe PQ bits and
963      * the EAS in w4-5
964      */
965     if (!xive2_end_is_escalate(&end)) {
966         return;
967     }
968 
969     /*
970      * Check the END ESe (Event State Buffer for escalation) for even
971      * further coalescing in the Router
972      */
973     if (!xive2_end_is_uncond_escalation(&end)) {
974         /* ESe[Q]=1 : end of escalation notification */
975         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
976                                        &end, END2_W1_ESe)) {
977             return;
978         }
979     }
980 
981     /*
982      * The END trigger becomes an Escalation trigger
983      */
984     xive2_router_end_notify(xrtr,
985                            xive_get_field32(END2_W4_END_BLOCK,     end.w4),
986                            xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
987                            xive_get_field32(END2_W5_ESC_END_DATA,  end.w5));
988 }
989 
990 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
991 {
992     Xive2Router *xrtr = XIVE2_ROUTER(xn);
993     uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
994     uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
995     Xive2Eas eas;
996 
997     /* EAS cache lookup */
998     if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
999         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1000         return;
1001     }
1002 
1003     if (!pq_checked) {
1004         bool notify;
1005         uint8_t pq;
1006 
1007         /* PQ cache lookup */
1008         if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
1009             /* Set FIR */
1010             g_assert_not_reached();
1011         }
1012 
1013         notify = xive_esb_trigger(&pq);
1014 
1015         if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
1016             /* Set FIR */
1017             g_assert_not_reached();
1018         }
1019 
1020         if (!notify) {
1021             return;
1022         }
1023     }
1024 
1025     if (!xive2_eas_is_valid(&eas)) {
1026         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
1027         return;
1028     }
1029 
1030     if (xive2_eas_is_masked(&eas)) {
1031         /* Notification completed */
1032         return;
1033     }
1034 
1035     /*
1036      * The event trigger becomes an END trigger
1037      */
1038     xive2_router_end_notify(xrtr,
1039                              xive_get_field64(EAS2_END_BLOCK, eas.w),
1040                              xive_get_field64(EAS2_END_INDEX, eas.w),
1041                              xive_get_field64(EAS2_END_DATA,  eas.w));
1042 }
1043 
1044 static const Property xive2_router_properties[] = {
1045     DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
1046                      TYPE_XIVE_FABRIC, XiveFabric *),
1047 };
1048 
1049 static void xive2_router_class_init(ObjectClass *klass, void *data)
1050 {
1051     DeviceClass *dc = DEVICE_CLASS(klass);
1052     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1053 
1054     dc->desc    = "XIVE2 Router Engine";
1055     device_class_set_props(dc, xive2_router_properties);
1056     /* Parent is SysBusDeviceClass. No need to call its realize hook */
1057     dc->realize = xive2_router_realize;
1058     xnc->notify = xive2_router_notify;
1059 }
1060 
1061 static const TypeInfo xive2_router_info = {
1062     .name          = TYPE_XIVE2_ROUTER,
1063     .parent        = TYPE_SYS_BUS_DEVICE,
1064     .abstract      = true,
1065     .instance_size = sizeof(Xive2Router),
1066     .class_size    = sizeof(Xive2RouterClass),
1067     .class_init    = xive2_router_class_init,
1068     .interfaces    = (InterfaceInfo[]) {
1069         { TYPE_XIVE_NOTIFIER },
1070         { TYPE_XIVE_PRESENTER },
1071         { }
1072     }
1073 };
1074 
1075 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
1076 {
1077     return !((addr >> shift) & 1);
1078 }
1079 
1080 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
1081 {
1082     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1083     uint32_t offset = addr & 0xFFF;
1084     uint8_t end_blk;
1085     uint32_t end_idx;
1086     Xive2End end;
1087     uint32_t end_esmask;
1088     uint8_t pq;
1089     uint64_t ret;
1090 
1091     /*
1092      * The block id should be deduced from the load address on the END
1093      * ESB MMIO but our model only supports a single block per XIVE chip.
1094      */
1095     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1096     end_idx = addr >> (xsrc->esb_shift + 1);
1097 
1098     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1099         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1100                       end_idx);
1101         return -1;
1102     }
1103 
1104     if (!xive2_end_is_valid(&end)) {
1105         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1106                       end_blk, end_idx);
1107         return -1;
1108     }
1109 
1110     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1111         END2_W1_ESe;
1112     pq = xive_get_field32(end_esmask, end.w1);
1113 
1114     switch (offset) {
1115     case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1116         ret = xive_esb_eoi(&pq);
1117 
1118         /* Forward the source event notification for routing ?? */
1119         break;
1120 
1121     case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1122         ret = pq;
1123         break;
1124 
1125     case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1126     case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1127     case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1128     case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1129         ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1130         break;
1131     default:
1132         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1133                       offset);
1134         return -1;
1135     }
1136 
1137     if (pq != xive_get_field32(end_esmask, end.w1)) {
1138         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1139         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1140     }
1141 
1142     return ret;
1143 }
1144 
1145 static void xive2_end_source_write(void *opaque, hwaddr addr,
1146                                    uint64_t value, unsigned size)
1147 {
1148     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1149     uint32_t offset = addr & 0xFFF;
1150     uint8_t end_blk;
1151     uint32_t end_idx;
1152     Xive2End end;
1153     uint32_t end_esmask;
1154     uint8_t pq;
1155     bool notify = false;
1156 
1157     /*
1158      * The block id should be deduced from the load address on the END
1159      * ESB MMIO but our model only supports a single block per XIVE chip.
1160      */
1161     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1162     end_idx = addr >> (xsrc->esb_shift + 1);
1163 
1164     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1165         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1166                       end_idx);
1167         return;
1168     }
1169 
1170     if (!xive2_end_is_valid(&end)) {
1171         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1172                       end_blk, end_idx);
1173         return;
1174     }
1175 
1176     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1177         END2_W1_ESe;
1178     pq = xive_get_field32(end_esmask, end.w1);
1179 
1180     switch (offset) {
1181     case 0 ... 0x3FF:
1182         notify = xive_esb_trigger(&pq);
1183         break;
1184 
1185     case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1186         /* TODO: can we check StoreEOI availability from the router ? */
1187         notify = xive_esb_eoi(&pq);
1188         break;
1189 
1190     case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1191         if (end_esmask == END2_W1_ESe) {
1192             qemu_log_mask(LOG_GUEST_ERROR,
1193                           "XIVE: END %x/%x can not EQ inject on ESe\n",
1194                            end_blk, end_idx);
1195             return;
1196         }
1197         notify = true;
1198         break;
1199 
1200     default:
1201         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
1202                       offset);
1203         return;
1204     }
1205 
1206     if (pq != xive_get_field32(end_esmask, end.w1)) {
1207         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1208         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1209     }
1210 
1211     /* TODO: Forward the source event notification for routing */
1212     if (notify) {
1213         ;
1214     }
1215 }
1216 
1217 static const MemoryRegionOps xive2_end_source_ops = {
1218     .read = xive2_end_source_read,
1219     .write = xive2_end_source_write,
1220     .endianness = DEVICE_BIG_ENDIAN,
1221     .valid = {
1222         .min_access_size = 1,
1223         .max_access_size = 8,
1224     },
1225     .impl = {
1226         .min_access_size = 1,
1227         .max_access_size = 8,
1228     },
1229 };
1230 
1231 static void xive2_end_source_realize(DeviceState *dev, Error **errp)
1232 {
1233     Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
1234 
1235     assert(xsrc->xrtr);
1236 
1237     if (!xsrc->nr_ends) {
1238         error_setg(errp, "Number of interrupt needs to be greater than 0");
1239         return;
1240     }
1241 
1242     if (xsrc->esb_shift != XIVE_ESB_4K &&
1243         xsrc->esb_shift != XIVE_ESB_64K) {
1244         error_setg(errp, "Invalid ESB shift setting");
1245         return;
1246     }
1247 
1248     /*
1249      * Each END is assigned an even/odd pair of MMIO pages, the even page
1250      * manages the ESn field while the odd page manages the ESe field.
1251      */
1252     memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1253                           &xive2_end_source_ops, xsrc, "xive.end",
1254                           (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1255 }
1256 
1257 static const Property xive2_end_source_properties[] = {
1258     DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
1259     DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
1260     DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
1261                      Xive2Router *),
1262 };
1263 
1264 static void xive2_end_source_class_init(ObjectClass *klass, void *data)
1265 {
1266     DeviceClass *dc = DEVICE_CLASS(klass);
1267 
1268     dc->desc    = "XIVE END Source";
1269     device_class_set_props(dc, xive2_end_source_properties);
1270     dc->realize = xive2_end_source_realize;
1271     dc->user_creatable = false;
1272 }
1273 
1274 static const TypeInfo xive2_end_source_info = {
1275     .name          = TYPE_XIVE2_END_SOURCE,
1276     .parent        = TYPE_DEVICE,
1277     .instance_size = sizeof(Xive2EndSource),
1278     .class_init    = xive2_end_source_class_init,
1279 };
1280 
1281 static void xive2_register_types(void)
1282 {
1283     type_register_static(&xive2_router_info);
1284     type_register_static(&xive2_end_source_info);
1285 }
1286 
1287 type_init(xive2_register_types)
1288