xref: /openbmc/qemu/hw/intc/xive2.c (revision 3abbec04e627396c32f2b7b7461961fb68c5c122)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3  *
4  * Copyright (c) 2019-2024, IBM Corporation..
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "system/cpus.h"
15 #include "system/dma.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/ppc/xive.h"
18 #include "hw/ppc/xive2.h"
19 #include "hw/ppc/xive2_regs.h"
20 #include "trace.h"
21 
22 uint32_t xive2_router_get_config(Xive2Router *xrtr)
23 {
24     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
25 
26     return xrc->get_config(xrtr);
27 }
28 
29 static int xive2_router_get_block_id(Xive2Router *xrtr)
30 {
31    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
32 
33    return xrc->get_block_id(xrtr);
34 }
35 
36 static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
37 {
38     uint64_t cache_addr;
39 
40     cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
41         xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
42     cache_addr <<= 8; /* aligned on a cache line pair */
43     return cache_addr;
44 }
45 
46 static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
47 {
48     uint32_t val = 0;
49     uint8_t *ptr, i;
50 
51     if (priority > 7) {
52         return 0;
53     }
54 
55     /*
56      * The per-priority backlog counters are 24-bit and the structure
57      * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from
58      * w2, which fits 8 priorities * 24-bits per priority.
59      */
60     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
61     for (i = 0; i < 3; i++, ptr++) {
62         val = (val << 8) + *ptr;
63     }
64     return val;
65 }
66 
67 static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority,
68                                    uint32_t val)
69 {
70     uint8_t *ptr, i;
71     uint32_t shift;
72 
73     if (priority > 7) {
74         return;
75     }
76 
77     if (val > 0xFFFFFF) {
78         val = 0xFFFFFF;
79     }
80     /*
81      * The per-priority backlog counters are 24-bit and the structure
82      * is stored in big endian
83      */
84     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
85     for (i = 0; i < 3; i++, ptr++) {
86         shift = 8 * (2 - i);
87         *ptr = (val >> shift) & 0xFF;
88     }
89 }
90 
91 uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr,
92                                          bool crowd,
93                                          uint8_t blk, uint32_t idx,
94                                          uint16_t offset, uint16_t val)
95 {
96     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
97     uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
98     uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
99     Xive2Nvgc nvgc;
100     uint32_t count, old_count;
101 
102     if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) {
103         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n",
104                       crowd ? "NVC" : "NVG", blk, idx);
105         return -1;
106     }
107     if (!xive2_nvgc_is_valid(&nvgc)) {
108         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx);
109         return -1;
110     }
111 
112     old_count = xive2_nvgc_get_backlog(&nvgc, priority);
113     count = old_count;
114     /*
115      * op:
116      * 0b00 => increment
117      * 0b01 => decrement
118      * 0b1- => read
119      */
120     if (op == 0b00 || op == 0b01) {
121         if (op == 0b00) {
122             count += val;
123         } else {
124             if (count > val) {
125                 count -= val;
126             } else {
127                 count = 0;
128             }
129         }
130         xive2_nvgc_set_backlog(&nvgc, priority, count);
131         xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc);
132     }
133     trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count);
134     return old_count;
135 }
136 
137 uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr,
138                                         uint8_t blk, uint32_t idx,
139                                         uint16_t offset)
140 {
141     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
142     uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
143     uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
144     Xive2Nvp nvp;
145     uint8_t ipb, old_ipb, rc;
146 
147     if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) {
148         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx);
149         return -1;
150     }
151     if (!xive2_nvp_is_valid(&nvp)) {
152         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx);
153         return -1;
154     }
155 
156     old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
157     ipb = old_ipb;
158     /*
159      * op:
160      * 0b00 => set priority bit
161      * 0b01 => reset priority bit
162      * 0b1- => read
163      */
164     if (op == 0b00 || op == 0b01) {
165         if (op == 0b00) {
166             ipb |= xive_priority_to_ipb(priority);
167         } else {
168             ipb &= ~xive_priority_to_ipb(priority);
169         }
170         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
171         xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2);
172     }
173     rc = !!(old_ipb & xive_priority_to_ipb(priority));
174     trace_xive_nvp_backlog_op(blk, idx, op, priority, rc);
175     return rc;
176 }
177 
178 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
179 {
180     if (!xive2_eas_is_valid(eas)) {
181         return;
182     }
183 
184     g_string_append_printf(buf, "  %08x %s end:%02x/%04x data:%08x\n",
185                            lisn, xive2_eas_is_masked(eas) ? "M" : " ",
186                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
187                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
188                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
189 }
190 
191 #define XIVE2_QSIZE_CHUNK_CL    128
192 #define XIVE2_QSIZE_CHUNK_4k   4096
193 /* Calculate max number of queue entries for an END */
194 static uint32_t xive2_end_get_qentries(Xive2End *end)
195 {
196     uint32_t w3 = end->w3;
197     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, w3);
198     if (xive_get_field32(END2_W3_CL, w3)) {
199         g_assert(qsize <= 4);
200         return (XIVE2_QSIZE_CHUNK_CL << qsize) / sizeof(uint32_t);
201     } else {
202         g_assert(qsize <= 12);
203         return (XIVE2_QSIZE_CHUNK_4k << qsize) / sizeof(uint32_t);
204     }
205 }
206 
207 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf)
208 {
209     uint64_t qaddr_base = xive2_end_qaddr(end);
210     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
211     uint32_t qentries = xive2_end_get_qentries(end);
212     int i;
213 
214     /*
215      * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
216      */
217     g_string_append_printf(buf, " [ ");
218     qindex = (qindex - (width - 1)) & (qentries - 1);
219     for (i = 0; i < width; i++) {
220         uint64_t qaddr = qaddr_base + (qindex << 2);
221         uint32_t qdata = -1;
222 
223         if (dma_memory_read(&address_space_memory, qaddr, &qdata,
224                             sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
225             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
226                           HWADDR_PRIx "\n", qaddr);
227             return;
228         }
229         g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
230                                be32_to_cpu(qdata));
231         qindex = (qindex + 1) & (qentries - 1);
232     }
233     g_string_append_printf(buf, "]");
234 }
235 
236 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
237 {
238     uint64_t qaddr_base = xive2_end_qaddr(end);
239     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
240     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
241     uint32_t qentries = xive2_end_get_qentries(end);
242 
243     uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
244     uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
245     uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
246     uint8_t pq;
247 
248     if (!xive2_end_is_valid(end)) {
249         return;
250     }
251 
252     pq = xive_get_field32(END2_W1_ESn, end->w1);
253 
254     g_string_append_printf(buf,
255                            "  %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
256                            "prio:%d nvp:%02x/%04x",
257                            end_idx,
258                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
259                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
260                            xive2_end_is_valid(end)    ? 'v' : '-',
261                            xive2_end_is_enqueue(end)  ? 'q' : '-',
262                            xive2_end_is_notify(end)   ? 'n' : '-',
263                            xive2_end_is_backlog(end)  ? 'b' : '-',
264                            xive2_end_is_precluded_escalation(end) ? 'p' : '-',
265                            xive2_end_is_escalate(end) ? 'e' : '-',
266                            xive2_end_is_escalate_end(end) ? 'N' : '-',
267                            xive2_end_is_uncond_escalation(end)   ? 'u' : '-',
268                            xive2_end_is_silent_escalation(end)   ? 's' : '-',
269                            xive2_end_is_firmware1(end)   ? 'f' : '-',
270                            xive2_end_is_firmware2(end)   ? 'F' : '-',
271                            xive2_end_is_ignore(end) ? 'i' : '-',
272                            xive2_end_is_crowd(end)  ? 'c' : '-',
273                            priority, nvx_blk, nvx_idx);
274 
275     if (qaddr_base) {
276         g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
277                                qaddr_base, qindex, qentries, qgen);
278         xive2_end_queue_pic_print_info(end, 6, buf);
279     }
280     g_string_append_c(buf, '\n');
281 }
282 
283 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
284                                   GString *buf)
285 {
286     Xive2Eas *eas = (Xive2Eas *) &end->w4;
287     uint8_t pq;
288 
289     if (!xive2_end_is_escalate(end)) {
290         return;
291     }
292 
293     pq = xive_get_field32(END2_W1_ESe, end->w1);
294 
295     g_string_append_printf(buf, "  %08x %c%c %c%c end:%02x/%04x data:%08x\n",
296                            end_idx,
297                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
298                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
299                            xive2_eas_is_valid(eas) ? 'v' : ' ',
300                            xive2_eas_is_masked(eas) ? 'M' : ' ',
301                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
302                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
303                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
304 }
305 
306 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
307 {
308     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
309     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
310     uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
311 
312     if (!xive2_nvp_is_valid(nvp)) {
313         return;
314     }
315 
316     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
317                            nvp_idx, eq_blk, eq_idx,
318                            xive_get_field32(NVP2_W2_IPB, nvp->w2),
319                            xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
320     if (cache_line) {
321         g_string_append_printf(buf, "  reporting CL:%016"PRIx64, cache_line);
322     }
323 
324     /*
325      * When the NVP is HW controlled, more fields are updated
326      */
327     if (xive2_nvp_is_hw(nvp)) {
328         g_string_append_printf(buf, " CPPR:%02x",
329                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
330         if (xive2_nvp_is_co(nvp)) {
331             g_string_append_printf(buf, " CO:%04x",
332                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
333         }
334     }
335     g_string_append_c(buf, '\n');
336 }
337 
338 void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
339 {
340     uint8_t i;
341 
342     if (!xive2_nvgc_is_valid(nvgc)) {
343         return;
344     }
345 
346     g_string_append_printf(buf, "  %08x PGoNext:%02x bklog: ", nvgc_idx,
347                            xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
348     for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
349         g_string_append_printf(buf, "[%d]=0x%x ",
350                                i, xive2_nvgc_get_backlog(nvgc, i));
351     }
352     g_string_append_printf(buf, "\n");
353 }
354 
355 static void xive2_end_enqueue(Xive2End *end, uint32_t data)
356 {
357     uint64_t qaddr_base = xive2_end_qaddr(end);
358     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
359     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
360 
361     uint64_t qaddr = qaddr_base + (qindex << 2);
362     uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
363     uint32_t qentries = xive2_end_get_qentries(end);
364 
365     if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
366                          MEMTXATTRS_UNSPECIFIED)) {
367         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
368                       HWADDR_PRIx "\n", qaddr);
369         return;
370     }
371 
372     qindex = (qindex + 1) & (qentries - 1);
373     if (qindex == 0) {
374         qgen ^= 1;
375         end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
376 
377         /* Set gen flipped to 1, it gets reset on a cache watch operation */
378         end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, 1);
379     }
380     end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
381 }
382 
383 static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx,
384                            uint8_t next_level)
385 {
386     uint32_t mask, next_idx;
387     uint8_t next_blk;
388 
389     /*
390      * Adjust the block and index of a VP for the next group/crowd
391      * size (PGofFirst/PGofNext field in the NVP and NVGC structures).
392      *
393      * The 6-bit group level is split into a 2-bit crowd and 4-bit
394      * group levels. Encoding is similar. However, we don't support
395      * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd
396      * size of 16.
397      */
398     next_blk = NVx_CROWD_LVL(next_level);
399     if (next_blk == 3) {
400         next_blk = 4;
401     }
402     mask = (1 << next_blk) - 1;
403     *nvgc_blk &= ~mask;
404     *nvgc_blk |= mask >> 1;
405 
406     next_idx = NVx_GROUP_LVL(next_level);
407     mask = (1 << next_idx) - 1;
408     *nvgc_idx &= ~mask;
409     *nvgc_idx |= mask >> 1;
410 }
411 
412 /*
413  * Scan the group chain and return the highest priority and group
414  * level of pending group interrupts.
415  */
416 static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr,
417                                             uint8_t nvx_blk, uint32_t nvx_idx,
418                                             uint8_t first_group,
419                                             uint8_t *out_level)
420 {
421     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
422     uint32_t nvgc_idx;
423     uint32_t current_level, count;
424     uint8_t nvgc_blk, prio;
425     Xive2Nvgc nvgc;
426 
427     for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) {
428         current_level = first_group & 0x3F;
429         nvgc_blk = nvx_blk;
430         nvgc_idx = nvx_idx;
431 
432         while (current_level) {
433             xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level);
434 
435             if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level),
436                                       nvgc_blk, nvgc_idx, &nvgc)) {
437                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
438                               nvgc_blk, nvgc_idx);
439                 return 0xFF;
440             }
441             if (!xive2_nvgc_is_valid(&nvgc)) {
442                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
443                               nvgc_blk, nvgc_idx);
444                 return 0xFF;
445             }
446 
447             count = xive2_nvgc_get_backlog(&nvgc, prio);
448             if (count) {
449                 *out_level = current_level;
450                 return prio;
451             }
452             current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F;
453         }
454     }
455     return 0xFF;
456 }
457 
458 static void xive2_presenter_backlog_decr(XivePresenter *xptr,
459                                          uint8_t nvx_blk, uint32_t nvx_idx,
460                                          uint8_t group_prio,
461                                          uint8_t group_level)
462 {
463     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
464     uint32_t nvgc_idx, count;
465     uint8_t nvgc_blk;
466     Xive2Nvgc nvgc;
467 
468     nvgc_blk = nvx_blk;
469     nvgc_idx = nvx_idx;
470     xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level);
471 
472     if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level),
473                               nvgc_blk, nvgc_idx, &nvgc)) {
474         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
475                       nvgc_blk, nvgc_idx);
476         return;
477     }
478     if (!xive2_nvgc_is_valid(&nvgc)) {
479         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
480                       nvgc_blk, nvgc_idx);
481         return;
482     }
483     count = xive2_nvgc_get_backlog(&nvgc, group_prio);
484     if (!count) {
485         return;
486     }
487     xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1);
488     xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level),
489                             nvgc_blk, nvgc_idx, &nvgc);
490 }
491 
492 /*
493  * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
494  *
495  * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
496  *
497  *   - if a context is enabled with the H bit set, the VP context
498  *     information is retrieved from the NVP structure (“check out”)
499  *     and stored back on a context pull (“check in”), the SW receives
500  *     the same context pull information as on P9
501  *
502  *   - the H bit cannot be changed while the V bit is set, i.e. a
503  *     context cannot be set up in the TIMA and then be “pushed” into
504  *     the NVP by changing the H bit while the context is enabled
505  */
506 
507 static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
508                                 uint8_t nvp_blk, uint32_t nvp_idx,
509                                 uint8_t ring)
510 {
511     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
512     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
513     Xive2Nvp nvp;
514     uint8_t *regs = &tctx->regs[ring];
515 
516     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
517         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
518                           nvp_blk, nvp_idx);
519         return;
520     }
521 
522     if (!xive2_nvp_is_valid(&nvp)) {
523         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
524                       nvp_blk, nvp_idx);
525         return;
526     }
527 
528     if (!xive2_nvp_is_hw(&nvp)) {
529         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
530                       nvp_blk, nvp_idx);
531         return;
532     }
533 
534     if (!xive2_nvp_is_co(&nvp)) {
535         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
536                       nvp_blk, nvp_idx);
537         return;
538     }
539 
540     if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
541         xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
542         qemu_log_mask(LOG_GUEST_ERROR,
543                       "XIVE: NVP %x/%x invalid checkout Thread %x\n",
544                       nvp_blk, nvp_idx, pir);
545         return;
546     }
547 
548     nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
549     nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
550     if (nvp.w0 & NVP2_W0_L) {
551         /*
552          * Typically not used. If LSMFB is restored with 0, it will
553          * force a backlog rescan
554          */
555         nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
556     }
557     if (nvp.w0 & NVP2_W0_G) {
558         nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]);
559     }
560     if (nvp.w0 & NVP2_W0_T) {
561         nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]);
562     }
563     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
564 
565     nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
566     /* NVP2_W1_CO_THRID_VALID only set once */
567     nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
568     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
569 }
570 
571 static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
572                              uint32_t *nvp_idx, bool *valid, bool *hw)
573 {
574     *nvp_blk = xive2_nvp_blk(cam);
575     *nvp_idx = xive2_nvp_idx(cam);
576     *valid = !!(cam & TM2_W2_VALID);
577     *hw = !!(cam & TM2_W2_HW);
578 }
579 
580 /*
581  * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
582  * width and block id width is configurable at the IC level.
583  *
584  *    chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
585  *    chipid << 24 | 0000 0000 0000 0001 threadid   (8Bit)
586  */
587 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
588 {
589     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
590     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
591     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
592     uint8_t blk = xive2_router_get_block_id(xrtr);
593     uint8_t tid_shift =
594         xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
595     uint8_t tid_mask = (1 << tid_shift) - 1;
596 
597     return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
598 }
599 
600 static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
601                                   hwaddr offset, unsigned size, uint8_t ring)
602 {
603     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
604     uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
605     uint32_t cam = be32_to_cpu(target_ringw2);
606     uint8_t nvp_blk;
607     uint32_t nvp_idx;
608     uint8_t cur_ring;
609     bool valid;
610     bool do_save;
611 
612     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
613 
614     if (!valid) {
615         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
616                       nvp_blk, nvp_idx);
617     }
618 
619     /* Invalidate CAM line of requested ring and all lower rings */
620     for (cur_ring = TM_QW0_USER; cur_ring <= ring;
621          cur_ring += XIVE_TM_RING_SIZE) {
622         uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
623         uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
624         memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
625     }
626 
627     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
628         xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
629     }
630 
631     /*
632      * Lower external interrupt line of requested ring and below except for
633      * USER, which doesn't exist.
634      */
635     for (cur_ring = TM_QW1_OS; cur_ring <= ring;
636          cur_ring += XIVE_TM_RING_SIZE) {
637         xive_tctx_reset_signal(tctx, cur_ring);
638     }
639     return target_ringw2;
640 }
641 
642 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
643                               hwaddr offset, unsigned size)
644 {
645     return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
646 }
647 
648 #define REPORT_LINE_GEN1_SIZE       16
649 
650 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
651                                       uint8_t size)
652 {
653     uint8_t *regs = tctx->regs;
654 
655     g_assert(size == REPORT_LINE_GEN1_SIZE);
656     memset(data, 0, size);
657     /*
658      * See xive architecture for description of what is saved. It is
659      * hand-picked information to fit in 16 bytes.
660      */
661     data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
662     data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
663     data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
664     data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
665     data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
666     data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
667     data[0x6] = 0xFF;
668     data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
669     data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
670     data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
671     data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
672     data[0x8] = regs[TM_QW1_OS + TM_NSR];
673     data[0x9] = regs[TM_QW1_OS + TM_CPPR];
674     data[0xA] = regs[TM_QW1_OS + TM_IPB];
675     data[0xB] = regs[TM_QW1_OS + TM_LGS];
676     if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
677         /*
678          * Logical server extension, except VU bit replaced by EB bit
679          * from NSR
680          */
681         data[0xC] = regs[TM_QW0_USER + TM_WORD2];
682         data[0xC] &= ~0x80;
683         data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
684         data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
685         data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
686         data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
687     }
688 }
689 
690 static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
691                                  hwaddr offset, uint64_t value,
692                                  unsigned size, uint8_t ring)
693 {
694     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
695     uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
696     uint8_t nvp_blk;
697     Xive2Nvp nvp;
698     uint64_t phys_addr;
699     MemTxResult result;
700 
701     hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
702     nvp_blk = xive2_nvp_blk(hw_cam);
703     nvp_idx = xive2_nvp_idx(hw_cam);
704 
705     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
706         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
707                       nvp_blk, nvp_idx);
708         return;
709     }
710 
711     if (!xive2_nvp_is_valid(&nvp)) {
712         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
713                       nvp_blk, nvp_idx);
714         return;
715     }
716 
717     xive2_cfg = xive2_router_get_config(xrtr);
718 
719     phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
720     if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
721         uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
722 
723         xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
724         result = dma_memory_write(&address_space_memory, phys_addr,
725                                   pull_ctxt, REPORT_LINE_GEN1_SIZE,
726                                   MEMTXATTRS_UNSPECIFIED);
727         assert(result == MEMTX_OK);
728     } else {
729         result = dma_memory_write(&address_space_memory, phys_addr,
730                                   &tctx->regs, sizeof(tctx->regs),
731                                   MEMTXATTRS_UNSPECIFIED);
732         assert(result == MEMTX_OK);
733         reserved = 0xFFFFFFFF;
734         result = dma_memory_write(&address_space_memory, phys_addr + 12,
735                                   &reserved, sizeof(reserved),
736                                   MEMTXATTRS_UNSPECIFIED);
737         assert(result == MEMTX_OK);
738     }
739 
740     /* the rest is similar to pull context to registers */
741     xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
742 }
743 
744 void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
745                              hwaddr offset, uint64_t value, unsigned size)
746 {
747     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
748 }
749 
750 
751 void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
752                                hwaddr offset, uint64_t value, unsigned size)
753 {
754     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
755 }
756 
757 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
758                                         uint8_t nvp_blk, uint32_t nvp_idx,
759                                         Xive2Nvp *nvp)
760 {
761     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
762     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
763     uint8_t cppr;
764 
765     if (!xive2_nvp_is_hw(nvp)) {
766         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
767                       nvp_blk, nvp_idx);
768         return 0;
769     }
770 
771     cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
772     nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
773     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
774 
775     tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
776     tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2);
777     tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2);
778     tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2);
779 
780     nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
781     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
782     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
783 
784     /*
785      * Checkout privilege: 0:OS, 1:Pool, 2:Hard
786      *
787      * TODO: we only support OS push/pull
788      */
789     nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
790 
791     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
792 
793     /* return restored CPPR to generate a CPU exception if needed */
794     return cppr;
795 }
796 
797 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
798                                    uint8_t nvp_blk, uint32_t nvp_idx,
799                                    bool do_restore)
800 {
801     XivePresenter *xptr = XIVE_PRESENTER(xrtr);
802     uint8_t ipb;
803     uint8_t backlog_level;
804     uint8_t group_level;
805     uint8_t first_group;
806     uint8_t backlog_prio;
807     uint8_t group_prio;
808     uint8_t *regs = &tctx->regs[TM_QW1_OS];
809     Xive2Nvp nvp;
810 
811     /*
812      * Grab the associated thread interrupt context registers in the
813      * associated NVP
814      */
815     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
816         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
817                       nvp_blk, nvp_idx);
818         return;
819     }
820 
821     if (!xive2_nvp_is_valid(&nvp)) {
822         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
823                       nvp_blk, nvp_idx);
824         return;
825     }
826 
827     /* Automatically restore thread context registers */
828     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
829         do_restore) {
830         xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
831     }
832 
833     ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
834     if (ipb) {
835         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
836         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
837     }
838     /* IPB bits in the backlog are merged with the TIMA IPB bits */
839     regs[TM_IPB] |= ipb;
840     backlog_prio = xive_ipb_to_pipr(regs[TM_IPB]);
841     backlog_level = 0;
842 
843     first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
844     if (first_group && regs[TM_LSMFB] < backlog_prio) {
845         group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx,
846                                                   first_group, &group_level);
847         regs[TM_LSMFB] = group_prio;
848         if (regs[TM_LGS] && group_prio < backlog_prio &&
849             group_prio < regs[TM_CPPR]) {
850 
851             /* VP can take a group interrupt */
852             xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx,
853                                          group_prio, group_level);
854             backlog_prio = group_prio;
855             backlog_level = group_level;
856         }
857     }
858 
859     /*
860      * Compute the PIPR based on the restored state.
861      * It will raise the External interrupt signal if needed.
862      */
863     xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
864 }
865 
866 /*
867  * Updating the OS CAM line can trigger a resend of interrupt
868  */
869 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
870                           hwaddr offset, uint64_t value, unsigned size)
871 {
872     uint32_t cam;
873     uint32_t qw1w2;
874     uint64_t qw1dw1;
875     uint8_t nvp_blk;
876     uint32_t nvp_idx;
877     bool vo;
878     bool do_restore;
879 
880     /* First update the thead context */
881     switch (size) {
882     case 4:
883         cam = value;
884         qw1w2 = cpu_to_be32(cam);
885         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
886         break;
887     case 8:
888         cam = value >> 32;
889         qw1dw1 = cpu_to_be64(value);
890         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
891         break;
892     default:
893         g_assert_not_reached();
894     }
895 
896     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
897 
898     /* Check the interrupt pending bits */
899     if (vo) {
900         xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
901                                do_restore);
902     }
903 }
904 
905 static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring,
906                                       uint32_t *nvp_blk, uint32_t *nvp_idx)
907 {
908     uint32_t w2, cam;
909 
910     w2 = xive_tctx_word2(&tctx->regs[ring]);
911     switch (ring) {
912     case TM_QW1_OS:
913         if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) {
914             return -1;
915         }
916         cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2);
917         break;
918     case TM_QW2_HV_POOL:
919         if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) {
920             return -1;
921         }
922         cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2);
923         break;
924     case TM_QW3_HV_PHYS:
925         if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) {
926             return -1;
927         }
928         cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx);
929         break;
930     default:
931         return -1;
932     }
933     *nvp_blk = xive2_nvp_blk(cam);
934     *nvp_idx = xive2_nvp_idx(cam);
935     return 0;
936 }
937 
938 static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
939 {
940     uint8_t *regs = &tctx->regs[ring];
941     Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr);
942     uint8_t old_cppr, backlog_prio, first_group, group_level = 0;
943     uint8_t pipr_min, lsmfb_min, ring_min;
944     bool group_enabled;
945     uint32_t nvp_blk, nvp_idx;
946     Xive2Nvp nvp;
947     int rc;
948 
949     trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
950                              regs[TM_IPB], regs[TM_PIPR],
951                              cppr, regs[TM_NSR]);
952 
953     if (cppr > XIVE_PRIORITY_MAX) {
954         cppr = 0xff;
955     }
956 
957     old_cppr = regs[TM_CPPR];
958     regs[TM_CPPR] = cppr;
959 
960     /*
961      * Recompute the PIPR based on local pending interrupts. It will
962      * be adjusted below if needed in case of pending group interrupts.
963      */
964     pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
965     group_enabled = !!regs[TM_LGS];
966     lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff;
967     ring_min = ring;
968 
969     /* PHYS updates also depend on POOL values */
970     if (ring == TM_QW3_HV_PHYS) {
971         uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL];
972 
973         /* POOL values only matter if POOL ctx is valid */
974         if (pregs[TM_WORD2] & 0x80) {
975 
976             uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]);
977             uint8_t pool_lsmfb = pregs[TM_LSMFB];
978 
979             /*
980              * Determine highest priority interrupt and
981              * remember which ring has it.
982              */
983             if (pool_pipr < pipr_min) {
984                 pipr_min = pool_pipr;
985                 if (pool_pipr < lsmfb_min) {
986                     ring_min = TM_QW2_HV_POOL;
987                 }
988             }
989 
990             /* Values needed for group priority calculation */
991             if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) {
992                 group_enabled = true;
993                 lsmfb_min = pool_lsmfb;
994                 if (lsmfb_min < pipr_min) {
995                     ring_min = TM_QW2_HV_POOL;
996                 }
997             }
998         }
999     }
1000 
1001     /* PIPR should not be set to a value greater than CPPR */
1002     regs[TM_PIPR] = (pipr_min > cppr) ? cppr : pipr_min;
1003 
1004     rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx);
1005     if (rc) {
1006         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n");
1007         return;
1008     }
1009 
1010     if (cppr < old_cppr) {
1011         /*
1012          * FIXME: check if there's a group interrupt being presented
1013          * and if the new cppr prevents it. If so, then the group
1014          * interrupt needs to be re-added to the backlog and
1015          * re-triggered (see re-trigger END info in the NVGC
1016          * structure)
1017          */
1018     }
1019 
1020     if (group_enabled &&
1021         lsmfb_min < cppr &&
1022         lsmfb_min < regs[TM_PIPR]) {
1023         /*
1024          * Thread has seen a group interrupt with a higher priority
1025          * than the new cppr or pending local interrupt. Check the
1026          * backlog
1027          */
1028         if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
1029             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
1030                           nvp_blk, nvp_idx);
1031             return;
1032         }
1033 
1034         if (!xive2_nvp_is_valid(&nvp)) {
1035             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
1036                           nvp_blk, nvp_idx);
1037             return;
1038         }
1039 
1040         first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
1041         if (!first_group) {
1042             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
1043                           nvp_blk, nvp_idx);
1044             return;
1045         }
1046 
1047         backlog_prio = xive2_presenter_backlog_scan(tctx->xptr,
1048                                                     nvp_blk, nvp_idx,
1049                                                     first_group, &group_level);
1050         tctx->regs[ring_min + TM_LSMFB] = backlog_prio;
1051         if (backlog_prio != 0xFF) {
1052             xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx,
1053                                          backlog_prio, group_level);
1054             regs[TM_PIPR] = backlog_prio;
1055         }
1056     }
1057     /* CPPR has changed, check if we need to raise a pending exception */
1058     xive_tctx_notify(tctx, ring_min, group_level);
1059 }
1060 
1061 void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
1062                           hwaddr offset, uint64_t value, unsigned size)
1063 {
1064     xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
1065 }
1066 
1067 void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
1068                           hwaddr offset, uint64_t value, unsigned size)
1069 {
1070     xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
1071 }
1072 
1073 static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
1074 {
1075     uint8_t *regs = &tctx->regs[ring];
1076 
1077     regs[TM_T] = target;
1078 }
1079 
1080 void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
1081                             hwaddr offset, uint64_t value, unsigned size)
1082 {
1083     xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
1084 }
1085 
1086 /*
1087  * XIVE Router (aka. Virtualization Controller or IVRE)
1088  */
1089 
1090 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1091                          Xive2Eas *eas)
1092 {
1093     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1094 
1095     return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1096 }
1097 
1098 static
1099 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1100                        uint8_t *pq)
1101 {
1102     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1103 
1104     return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
1105 }
1106 
1107 static
1108 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1109                        uint8_t *pq)
1110 {
1111     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1112 
1113     return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
1114 }
1115 
1116 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
1117                          Xive2End *end)
1118 {
1119    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1120 
1121    return xrc->get_end(xrtr, end_blk, end_idx, end);
1122 }
1123 
1124 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
1125                            Xive2End *end, uint8_t word_number)
1126 {
1127    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1128 
1129    return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1130 }
1131 
1132 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
1133                          Xive2Nvp *nvp)
1134 {
1135    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1136 
1137    return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
1138 }
1139 
1140 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
1141                            Xive2Nvp *nvp, uint8_t word_number)
1142 {
1143    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1144 
1145    return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
1146 }
1147 
1148 int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
1149                           uint8_t nvgc_blk, uint32_t nvgc_idx,
1150                           Xive2Nvgc *nvgc)
1151 {
1152    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1153 
1154    return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
1155 }
1156 
1157 int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
1158                             uint8_t nvgc_blk, uint32_t nvgc_idx,
1159                             Xive2Nvgc *nvgc)
1160 {
1161    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1162 
1163    return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
1164 }
1165 
1166 static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2,
1167                                 uint32_t vp_mask)
1168 {
1169     return (cam1 & vp_mask) == (cam2 & vp_mask);
1170 }
1171 
1172 static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd)
1173 {
1174     uint8_t block_mask = 0b1111;
1175 
1176     /* 3 supported crowd sizes: 2, 4, 16 */
1177     if (crowd) {
1178         uint32_t size = xive_get_vpgroup_size(nvt_blk);
1179 
1180         if (size != 2 && size != 4 && size != 16) {
1181             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of %d",
1182                                            size);
1183             return block_mask;
1184         }
1185         block_mask &= ~(size - 1);
1186     }
1187     return block_mask;
1188 }
1189 
1190 static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore)
1191 {
1192     uint32_t index_mask = 0xFFFFFF; /* 24 bits */
1193 
1194     if (cam_ignore) {
1195         uint32_t size = xive_get_vpgroup_size(nvt_index);
1196 
1197         if (size < 2) {
1198             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group size of %d",
1199                                            size);
1200             return index_mask;
1201         }
1202         index_mask &= ~(size - 1);
1203     }
1204     return index_mask;
1205 }
1206 
1207 /*
1208  * The thread context register words are in big-endian format.
1209  */
1210 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1211                                uint8_t format,
1212                                uint8_t nvt_blk, uint32_t nvt_idx,
1213                                bool crowd, bool cam_ignore,
1214                                uint32_t logic_serv)
1215 {
1216     uint32_t cam =   xive2_nvp_cam_line(nvt_blk, nvt_idx);
1217     uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1218     uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1219     uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1220     uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1221 
1222     uint32_t index_mask, vp_mask;
1223     uint8_t block_mask;
1224 
1225     if (format == 0) {
1226         /*
1227          * i=0: Specific NVT notification
1228          * i=1: VP-group notification (bits ignored at the end of the
1229          *      NVT identifier)
1230          */
1231         block_mask = xive2_get_vp_block_mask(nvt_blk, crowd);
1232         index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore);
1233         vp_mask = xive2_nvp_cam_line(block_mask, index_mask);
1234 
1235         /* For VP-group notifications, threads with LGS=0 are excluded */
1236 
1237         /* PHYS ring */
1238         if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
1239             !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) &&
1240             xive2_vp_match_mask(cam,
1241                                 xive2_tctx_hw_cam_line(xptr, tctx),
1242                                 vp_mask)) {
1243             return TM_QW3_HV_PHYS;
1244         }
1245 
1246         /* HV POOL ring */
1247         if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
1248             !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) &&
1249             xive2_vp_match_mask(cam,
1250                                 xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2),
1251                                 vp_mask)) {
1252             return TM_QW2_HV_POOL;
1253         }
1254 
1255         /* OS ring */
1256         if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
1257             !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) &&
1258             xive2_vp_match_mask(cam,
1259                                 xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2),
1260                                 vp_mask)) {
1261             return TM_QW1_OS;
1262         }
1263     } else {
1264         /* F=1 : User level Event-Based Branch (EBB) notification */
1265 
1266         /* FIXME: what if cam_ignore and LGS = 0 ? */
1267         /* USER ring */
1268         if  ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
1269              (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
1270              (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
1271              (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
1272             return TM_QW0_USER;
1273         }
1274     }
1275     return -1;
1276 }
1277 
1278 bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
1279 {
1280     /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
1281     uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
1282     uint8_t *alt_regs = &tctx->regs[alt_ring];
1283 
1284     /*
1285      * The xive2_presenter_tctx_match() above tells if there's a match
1286      * but for VP-group notification, we still need to look at the
1287      * priority to know if the thread can take the interrupt now or if
1288      * it is precluded.
1289      */
1290     if (priority < alt_regs[TM_PIPR]) {
1291         return false;
1292     }
1293     return true;
1294 }
1295 
1296 void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority)
1297 {
1298     uint8_t *regs = &tctx->regs[ring];
1299 
1300     /*
1301      * Called by the router during a VP-group notification when the
1302      * thread matches but can't take the interrupt because it's
1303      * already running at a more favored priority. It then stores the
1304      * new interrupt priority in the LSMFB field.
1305      */
1306     regs[TM_LSMFB] = priority;
1307 }
1308 
1309 static void xive2_router_realize(DeviceState *dev, Error **errp)
1310 {
1311     Xive2Router *xrtr = XIVE2_ROUTER(dev);
1312 
1313     assert(xrtr->xfb);
1314 }
1315 
1316 /*
1317  * Notification using the END ESe/ESn bit (Event State Buffer for
1318  * escalation and notification). Profide further coalescing in the
1319  * Router.
1320  */
1321 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
1322                                        uint32_t end_idx, Xive2End *end,
1323                                        uint32_t end_esmask)
1324 {
1325     uint8_t pq = xive_get_field32(end_esmask, end->w1);
1326     bool notify = xive_esb_trigger(&pq);
1327 
1328     if (pq != xive_get_field32(end_esmask, end->w1)) {
1329         end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1330         xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
1331     }
1332 
1333     /* ESe/n[Q]=1 : end of notification */
1334     return notify;
1335 }
1336 
1337 /*
1338  * An END trigger can come from an event trigger (IPI or HW) or from
1339  * another chip. We don't model the PowerBus but the END trigger
1340  * message has the same parameters than in the function below.
1341  */
1342 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
1343                                     uint32_t end_idx, uint32_t end_data)
1344 {
1345     Xive2End end;
1346     uint8_t priority;
1347     uint8_t format;
1348     bool found, precluded;
1349     uint8_t nvx_blk;
1350     uint32_t nvx_idx;
1351 
1352     /* END cache lookup */
1353     if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
1354         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1355                       end_idx);
1356         return;
1357     }
1358 
1359     if (!xive2_end_is_valid(&end)) {
1360         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1361                       end_blk, end_idx);
1362         return;
1363     }
1364 
1365     if (xive2_end_is_crowd(&end) && !xive2_end_is_ignore(&end)) {
1366         qemu_log_mask(LOG_GUEST_ERROR,
1367                       "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n");
1368         return;
1369     }
1370 
1371     if (xive2_end_is_enqueue(&end)) {
1372         xive2_end_enqueue(&end, end_data);
1373         /* Enqueuing event data modifies the EQ toggle and index */
1374         xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1375     }
1376 
1377     /*
1378      * When the END is silent, we skip the notification part.
1379      */
1380     if (xive2_end_is_silent_escalation(&end)) {
1381         goto do_escalation;
1382     }
1383 
1384     /*
1385      * The W7 format depends on the F bit in W6. It defines the type
1386      * of the notification :
1387      *
1388      *   F=0 : single or multiple NVP notification
1389      *   F=1 : User level Event-Based Branch (EBB) notification, no
1390      *         priority
1391      */
1392     format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
1393     priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
1394 
1395     /* The END is masked */
1396     if (format == 0 && priority == 0xff) {
1397         return;
1398     }
1399 
1400     /*
1401      * Check the END ESn (Event State Buffer for notification) for
1402      * even further coalescing in the Router
1403      */
1404     if (!xive2_end_is_notify(&end)) {
1405         /* ESn[Q]=1 : end of notification */
1406         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
1407                                        &end, END2_W1_ESn)) {
1408             return;
1409         }
1410     }
1411 
1412     /*
1413      * Follows IVPE notification
1414      */
1415     nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
1416     nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
1417 
1418     found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx,
1419                           xive2_end_is_crowd(&end), xive2_end_is_ignore(&end),
1420                           priority,
1421                           xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7),
1422                           &precluded);
1423 
1424     /* TODO: Auto EOI. */
1425 
1426     if (found) {
1427         return;
1428     }
1429 
1430     /*
1431      * If no matching NVP is dispatched on a HW thread :
1432      * - specific VP: update the NVP structure if backlog is activated
1433      * - VP-group: update the backlog counter for that priority in the NVG
1434      */
1435     if (xive2_end_is_backlog(&end)) {
1436 
1437         if (format == 1) {
1438             qemu_log_mask(LOG_GUEST_ERROR,
1439                           "XIVE: END %x/%x invalid config: F1 & backlog\n",
1440                           end_blk, end_idx);
1441             return;
1442         }
1443 
1444         if (!xive2_end_is_ignore(&end)) {
1445             uint8_t ipb;
1446             Xive2Nvp nvp;
1447 
1448             /* NVP cache lookup */
1449             if (xive2_router_get_nvp(xrtr, nvx_blk, nvx_idx, &nvp)) {
1450                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
1451                               nvx_blk, nvx_idx);
1452                 return;
1453             }
1454 
1455             if (!xive2_nvp_is_valid(&nvp)) {
1456                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
1457                               nvx_blk, nvx_idx);
1458                 return;
1459             }
1460 
1461             /*
1462              * Record the IPB in the associated NVP structure for later
1463              * use. The presenter will resend the interrupt when the vCPU
1464              * is dispatched again on a HW thread.
1465              */
1466             ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
1467                 xive_priority_to_ipb(priority);
1468             nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
1469             xive2_router_write_nvp(xrtr, nvx_blk, nvx_idx, &nvp, 2);
1470         } else {
1471             Xive2Nvgc nvgc;
1472             uint32_t backlog;
1473             bool crowd;
1474 
1475             crowd = xive2_end_is_crowd(&end);
1476 
1477             /*
1478              * For groups and crowds, the per-priority backlog
1479              * counters are stored in the NVG/NVC structures
1480              */
1481             if (xive2_router_get_nvgc(xrtr, crowd,
1482                                       nvx_blk, nvx_idx, &nvgc)) {
1483                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n",
1484                               crowd ? "NVC" : "NVG", nvx_blk, nvx_idx);
1485                 return;
1486             }
1487 
1488             if (!xive2_nvgc_is_valid(&nvgc)) {
1489                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n",
1490                               nvx_blk, nvx_idx);
1491                 return;
1492             }
1493 
1494             /*
1495              * Increment the backlog counter for that priority.
1496              * We only call broadcast the first time the counter is
1497              * incremented. broadcast will set the LSMFB field of the TIMA of
1498              * relevant threads so that they know an interrupt is pending.
1499              */
1500             backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1;
1501             xive2_nvgc_set_backlog(&nvgc, priority, backlog);
1502             xive2_router_write_nvgc(xrtr, crowd, nvx_blk, nvx_idx, &nvgc);
1503 
1504             if (backlog == 1) {
1505                 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb);
1506                 xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx,
1507                                xive2_end_is_crowd(&end),
1508                                xive2_end_is_ignore(&end),
1509                                priority);
1510 
1511                 if (!xive2_end_is_precluded_escalation(&end)) {
1512                     /*
1513                      * The interrupt will be picked up when the
1514                      * matching thread lowers its priority level
1515                      */
1516                     return;
1517                 }
1518             }
1519         }
1520     }
1521 
1522 do_escalation:
1523     /*
1524      * If activated, escalate notification using the ESe PQ bits and
1525      * the EAS in w4-5
1526      */
1527     if (!xive2_end_is_escalate(&end)) {
1528         return;
1529     }
1530 
1531     /*
1532      * Check the END ESe (Event State Buffer for escalation) for even
1533      * further coalescing in the Router
1534      */
1535     if (!xive2_end_is_uncond_escalation(&end)) {
1536         /* ESe[Q]=1 : end of escalation notification */
1537         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
1538                                        &end, END2_W1_ESe)) {
1539             return;
1540         }
1541     }
1542 
1543     /*
1544      * The END trigger becomes an Escalation trigger
1545      */
1546     xive2_router_end_notify(xrtr,
1547                            xive_get_field32(END2_W4_END_BLOCK,     end.w4),
1548                            xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
1549                            xive_get_field32(END2_W5_ESC_END_DATA,  end.w5));
1550 }
1551 
1552 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
1553 {
1554     Xive2Router *xrtr = XIVE2_ROUTER(xn);
1555     uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1556     uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1557     Xive2Eas eas;
1558 
1559     /* EAS cache lookup */
1560     if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1561         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1562         return;
1563     }
1564 
1565     if (!pq_checked) {
1566         bool notify;
1567         uint8_t pq;
1568 
1569         /* PQ cache lookup */
1570         if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
1571             /* Set FIR */
1572             g_assert_not_reached();
1573         }
1574 
1575         notify = xive_esb_trigger(&pq);
1576 
1577         if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
1578             /* Set FIR */
1579             g_assert_not_reached();
1580         }
1581 
1582         if (!notify) {
1583             return;
1584         }
1585     }
1586 
1587     if (!xive2_eas_is_valid(&eas)) {
1588         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
1589         return;
1590     }
1591 
1592     if (xive2_eas_is_masked(&eas)) {
1593         /* Notification completed */
1594         return;
1595     }
1596 
1597     /*
1598      * The event trigger becomes an END trigger
1599      */
1600     xive2_router_end_notify(xrtr,
1601                              xive_get_field64(EAS2_END_BLOCK, eas.w),
1602                              xive_get_field64(EAS2_END_INDEX, eas.w),
1603                              xive_get_field64(EAS2_END_DATA,  eas.w));
1604 }
1605 
1606 static const Property xive2_router_properties[] = {
1607     DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
1608                      TYPE_XIVE_FABRIC, XiveFabric *),
1609 };
1610 
1611 static void xive2_router_class_init(ObjectClass *klass, const void *data)
1612 {
1613     DeviceClass *dc = DEVICE_CLASS(klass);
1614     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1615 
1616     dc->desc    = "XIVE2 Router Engine";
1617     device_class_set_props(dc, xive2_router_properties);
1618     /* Parent is SysBusDeviceClass. No need to call its realize hook */
1619     dc->realize = xive2_router_realize;
1620     xnc->notify = xive2_router_notify;
1621 }
1622 
1623 static const TypeInfo xive2_router_info = {
1624     .name          = TYPE_XIVE2_ROUTER,
1625     .parent        = TYPE_SYS_BUS_DEVICE,
1626     .abstract      = true,
1627     .instance_size = sizeof(Xive2Router),
1628     .class_size    = sizeof(Xive2RouterClass),
1629     .class_init    = xive2_router_class_init,
1630     .interfaces    = (const InterfaceInfo[]) {
1631         { TYPE_XIVE_NOTIFIER },
1632         { TYPE_XIVE_PRESENTER },
1633         { }
1634     }
1635 };
1636 
1637 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
1638 {
1639     return !((addr >> shift) & 1);
1640 }
1641 
1642 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
1643 {
1644     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1645     uint32_t offset = addr & 0xFFF;
1646     uint8_t end_blk;
1647     uint32_t end_idx;
1648     Xive2End end;
1649     uint32_t end_esmask;
1650     uint8_t pq;
1651     uint64_t ret;
1652 
1653     /*
1654      * The block id should be deduced from the load address on the END
1655      * ESB MMIO but our model only supports a single block per XIVE chip.
1656      */
1657     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1658     end_idx = addr >> (xsrc->esb_shift + 1);
1659 
1660     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1661         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1662                       end_idx);
1663         return -1;
1664     }
1665 
1666     if (!xive2_end_is_valid(&end)) {
1667         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1668                       end_blk, end_idx);
1669         return -1;
1670     }
1671 
1672     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1673         END2_W1_ESe;
1674     pq = xive_get_field32(end_esmask, end.w1);
1675 
1676     switch (offset) {
1677     case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1678         ret = xive_esb_eoi(&pq);
1679 
1680         /* Forward the source event notification for routing ?? */
1681         break;
1682 
1683     case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1684         ret = pq;
1685         break;
1686 
1687     case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1688     case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1689     case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1690     case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1691         ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1692         break;
1693     default:
1694         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1695                       offset);
1696         return -1;
1697     }
1698 
1699     if (pq != xive_get_field32(end_esmask, end.w1)) {
1700         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1701         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1702     }
1703 
1704     return ret;
1705 }
1706 
1707 static void xive2_end_source_write(void *opaque, hwaddr addr,
1708                                    uint64_t value, unsigned size)
1709 {
1710     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1711     uint32_t offset = addr & 0xFFF;
1712     uint8_t end_blk;
1713     uint32_t end_idx;
1714     Xive2End end;
1715     uint32_t end_esmask;
1716     uint8_t pq;
1717     bool notify = false;
1718 
1719     /*
1720      * The block id should be deduced from the load address on the END
1721      * ESB MMIO but our model only supports a single block per XIVE chip.
1722      */
1723     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1724     end_idx = addr >> (xsrc->esb_shift + 1);
1725 
1726     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1727         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1728                       end_idx);
1729         return;
1730     }
1731 
1732     if (!xive2_end_is_valid(&end)) {
1733         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1734                       end_blk, end_idx);
1735         return;
1736     }
1737 
1738     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1739         END2_W1_ESe;
1740     pq = xive_get_field32(end_esmask, end.w1);
1741 
1742     switch (offset) {
1743     case 0 ... 0x3FF:
1744         notify = xive_esb_trigger(&pq);
1745         break;
1746 
1747     case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1748         /* TODO: can we check StoreEOI availability from the router ? */
1749         notify = xive_esb_eoi(&pq);
1750         break;
1751 
1752     case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1753         if (end_esmask == END2_W1_ESe) {
1754             qemu_log_mask(LOG_GUEST_ERROR,
1755                           "XIVE: END %x/%x can not EQ inject on ESe\n",
1756                            end_blk, end_idx);
1757             return;
1758         }
1759         notify = true;
1760         break;
1761 
1762     default:
1763         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
1764                       offset);
1765         return;
1766     }
1767 
1768     if (pq != xive_get_field32(end_esmask, end.w1)) {
1769         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1770         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1771     }
1772 
1773     /* TODO: Forward the source event notification for routing */
1774     if (notify) {
1775         ;
1776     }
1777 }
1778 
1779 static const MemoryRegionOps xive2_end_source_ops = {
1780     .read = xive2_end_source_read,
1781     .write = xive2_end_source_write,
1782     .endianness = DEVICE_BIG_ENDIAN,
1783     .valid = {
1784         .min_access_size = 1,
1785         .max_access_size = 8,
1786     },
1787     .impl = {
1788         .min_access_size = 1,
1789         .max_access_size = 8,
1790     },
1791 };
1792 
1793 static void xive2_end_source_realize(DeviceState *dev, Error **errp)
1794 {
1795     Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
1796 
1797     assert(xsrc->xrtr);
1798 
1799     if (!xsrc->nr_ends) {
1800         error_setg(errp, "Number of interrupt needs to be greater than 0");
1801         return;
1802     }
1803 
1804     if (xsrc->esb_shift != XIVE_ESB_4K &&
1805         xsrc->esb_shift != XIVE_ESB_64K) {
1806         error_setg(errp, "Invalid ESB shift setting");
1807         return;
1808     }
1809 
1810     /*
1811      * Each END is assigned an even/odd pair of MMIO pages, the even page
1812      * manages the ESn field while the odd page manages the ESe field.
1813      */
1814     memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1815                           &xive2_end_source_ops, xsrc, "xive.end",
1816                           (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1817 }
1818 
1819 static const Property xive2_end_source_properties[] = {
1820     DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
1821     DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
1822     DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
1823                      Xive2Router *),
1824 };
1825 
1826 static void xive2_end_source_class_init(ObjectClass *klass, const void *data)
1827 {
1828     DeviceClass *dc = DEVICE_CLASS(klass);
1829 
1830     dc->desc    = "XIVE END Source";
1831     device_class_set_props(dc, xive2_end_source_properties);
1832     dc->realize = xive2_end_source_realize;
1833     dc->user_creatable = false;
1834 }
1835 
1836 static const TypeInfo xive2_end_source_info = {
1837     .name          = TYPE_XIVE2_END_SOURCE,
1838     .parent        = TYPE_DEVICE,
1839     .instance_size = sizeof(Xive2EndSource),
1840     .class_init    = xive2_end_source_class_init,
1841 };
1842 
1843 static void xive2_register_types(void)
1844 {
1845     type_register_static(&xive2_router_info);
1846     type_register_static(&xive2_end_source_info);
1847 }
1848 
1849 type_init(xive2_register_types)
1850