xref: /openbmc/qemu/hw/intc/xive2.c (revision 8d373176181fbc11f8d8eae2b4532b867f083ea6)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
3  *
4  * Copyright (c) 2019-2024, IBM Corporation..
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "system/cpus.h"
15 #include "system/dma.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/ppc/xive.h"
18 #include "hw/ppc/xive2.h"
19 #include "hw/ppc/xive2_regs.h"
20 #include "trace.h"
21 
22 uint32_t xive2_router_get_config(Xive2Router *xrtr)
23 {
24     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
25 
26     return xrc->get_config(xrtr);
27 }
28 
29 static int xive2_router_get_block_id(Xive2Router *xrtr)
30 {
31    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
32 
33    return xrc->get_block_id(xrtr);
34 }
35 
36 static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
37 {
38     uint64_t cache_addr;
39 
40     cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
41         xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
42     cache_addr <<= 8; /* aligned on a cache line pair */
43     return cache_addr;
44 }
45 
46 static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
47 {
48     uint32_t val = 0;
49     uint8_t *ptr, i;
50 
51     if (priority > 7) {
52         return 0;
53     }
54 
55     /*
56      * The per-priority backlog counters are 24-bit and the structure
57      * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from
58      * w2, which fits 8 priorities * 24-bits per priority.
59      */
60     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
61     for (i = 0; i < 3; i++, ptr++) {
62         val = (val << 8) + *ptr;
63     }
64     return val;
65 }
66 
67 static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority,
68                                    uint32_t val)
69 {
70     uint8_t *ptr, i;
71     uint32_t shift;
72 
73     if (priority > 7) {
74         return;
75     }
76 
77     if (val > 0xFFFFFF) {
78         val = 0xFFFFFF;
79     }
80     /*
81      * The per-priority backlog counters are 24-bit and the structure
82      * is stored in big endian
83      */
84     ptr = (uint8_t *)&nvgc->w2 + priority * 3;
85     for (i = 0; i < 3; i++, ptr++) {
86         shift = 8 * (2 - i);
87         *ptr = (val >> shift) & 0xFF;
88     }
89 }
90 
91 uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr,
92                                          bool crowd,
93                                          uint8_t blk, uint32_t idx,
94                                          uint16_t offset, uint16_t val)
95 {
96     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
97     uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
98     uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
99     Xive2Nvgc nvgc;
100     uint32_t count, old_count;
101 
102     if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) {
103         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n",
104                       crowd ? "NVC" : "NVG", blk, idx);
105         return -1;
106     }
107     if (!xive2_nvgc_is_valid(&nvgc)) {
108         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx);
109         return -1;
110     }
111 
112     old_count = xive2_nvgc_get_backlog(&nvgc, priority);
113     count = old_count;
114     /*
115      * op:
116      * 0b00 => increment
117      * 0b01 => decrement
118      * 0b1- => read
119      */
120     if (op == 0b00 || op == 0b01) {
121         if (op == 0b00) {
122             count += val;
123         } else {
124             if (count > val) {
125                 count -= val;
126             } else {
127                 count = 0;
128             }
129         }
130         xive2_nvgc_set_backlog(&nvgc, priority, count);
131         xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc);
132     }
133     trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count);
134     return old_count;
135 }
136 
137 uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr,
138                                         uint8_t blk, uint32_t idx,
139                                         uint16_t offset)
140 {
141     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
142     uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset);
143     uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset);
144     Xive2Nvp nvp;
145     uint8_t ipb, old_ipb, rc;
146 
147     if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) {
148         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx);
149         return -1;
150     }
151     if (!xive2_nvp_is_valid(&nvp)) {
152         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx);
153         return -1;
154     }
155 
156     old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
157     ipb = old_ipb;
158     /*
159      * op:
160      * 0b00 => set priority bit
161      * 0b01 => reset priority bit
162      * 0b1- => read
163      */
164     if (op == 0b00 || op == 0b01) {
165         if (op == 0b00) {
166             ipb |= xive_priority_to_ipb(priority);
167         } else {
168             ipb &= ~xive_priority_to_ipb(priority);
169         }
170         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
171         xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2);
172     }
173     rc = !!(old_ipb & xive_priority_to_ipb(priority));
174     trace_xive_nvp_backlog_op(blk, idx, op, priority, rc);
175     return rc;
176 }
177 
178 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
179 {
180     if (!xive2_eas_is_valid(eas)) {
181         return;
182     }
183 
184     g_string_append_printf(buf, "  %08x %s end:%02x/%04x data:%08x\n",
185                            lisn, xive2_eas_is_masked(eas) ? "M" : " ",
186                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
187                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
188                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
189 }
190 
191 #define XIVE2_QSIZE_CHUNK_CL    128
192 #define XIVE2_QSIZE_CHUNK_4k   4096
193 /* Calculate max number of queue entries for an END */
194 static uint32_t xive2_end_get_qentries(Xive2End *end)
195 {
196     uint32_t w3 = end->w3;
197     uint32_t qsize = xive_get_field32(END2_W3_QSIZE, w3);
198     if (xive_get_field32(END2_W3_CL, w3)) {
199         g_assert(qsize <= 4);
200         return (XIVE2_QSIZE_CHUNK_CL << qsize) / sizeof(uint32_t);
201     } else {
202         g_assert(qsize <= 12);
203         return (XIVE2_QSIZE_CHUNK_4k << qsize) / sizeof(uint32_t);
204     }
205 }
206 
207 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf)
208 {
209     uint64_t qaddr_base = xive2_end_qaddr(end);
210     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
211     uint32_t qentries = xive2_end_get_qentries(end);
212     int i;
213 
214     /*
215      * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
216      */
217     g_string_append_printf(buf, " [ ");
218     qindex = (qindex - (width - 1)) & (qentries - 1);
219     for (i = 0; i < width; i++) {
220         uint64_t qaddr = qaddr_base + (qindex << 2);
221         uint32_t qdata = -1;
222 
223         if (dma_memory_read(&address_space_memory, qaddr, &qdata,
224                             sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
225             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
226                           HWADDR_PRIx "\n", qaddr);
227             return;
228         }
229         g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
230                                be32_to_cpu(qdata));
231         qindex = (qindex + 1) & (qentries - 1);
232     }
233     g_string_append_printf(buf, "]");
234 }
235 
236 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
237 {
238     uint64_t qaddr_base = xive2_end_qaddr(end);
239     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
240     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
241     uint32_t qentries = xive2_end_get_qentries(end);
242 
243     uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
244     uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
245     uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
246     uint8_t pq;
247 
248     if (!xive2_end_is_valid(end)) {
249         return;
250     }
251 
252     pq = xive_get_field32(END2_W1_ESn, end->w1);
253 
254     g_string_append_printf(buf,
255                            "  %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
256                            "prio:%d nvp:%02x/%04x",
257                            end_idx,
258                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
259                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
260                            xive2_end_is_valid(end)    ? 'v' : '-',
261                            xive2_end_is_enqueue(end)  ? 'q' : '-',
262                            xive2_end_is_notify(end)   ? 'n' : '-',
263                            xive2_end_is_backlog(end)  ? 'b' : '-',
264                            xive2_end_is_precluded_escalation(end) ? 'p' : '-',
265                            xive2_end_is_escalate(end) ? 'e' : '-',
266                            xive2_end_is_escalate_end(end) ? 'N' : '-',
267                            xive2_end_is_uncond_escalation(end)   ? 'u' : '-',
268                            xive2_end_is_silent_escalation(end)   ? 's' : '-',
269                            xive2_end_is_firmware1(end)   ? 'f' : '-',
270                            xive2_end_is_firmware2(end)   ? 'F' : '-',
271                            xive2_end_is_ignore(end) ? 'i' : '-',
272                            xive2_end_is_crowd(end)  ? 'c' : '-',
273                            priority, nvx_blk, nvx_idx);
274 
275     if (qaddr_base) {
276         g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
277                                qaddr_base, qindex, qentries, qgen);
278         xive2_end_queue_pic_print_info(end, 6, buf);
279     }
280     g_string_append_c(buf, '\n');
281 }
282 
283 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
284                                   GString *buf)
285 {
286     Xive2Eas *eas = (Xive2Eas *) &end->w4;
287     uint8_t pq;
288 
289     if (!xive2_end_is_escalate(end)) {
290         return;
291     }
292 
293     pq = xive_get_field32(END2_W1_ESe, end->w1);
294 
295     g_string_append_printf(buf, "  %08x %c%c %c%c end:%02x/%04x data:%08x\n",
296                            end_idx,
297                            pq & XIVE_ESB_VAL_P ? 'P' : '-',
298                            pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
299                            xive2_eas_is_valid(eas) ? 'v' : ' ',
300                            xive2_eas_is_masked(eas) ? 'M' : ' ',
301                            (uint8_t)  xive_get_field64(EAS2_END_BLOCK, eas->w),
302                            (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
303                            (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
304 }
305 
306 void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
307 {
308     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
309     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
310     uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
311 
312     if (!xive2_nvp_is_valid(nvp)) {
313         return;
314     }
315 
316     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
317                            nvp_idx, eq_blk, eq_idx,
318                            xive_get_field32(NVP2_W2_IPB, nvp->w2),
319                            xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
320     if (cache_line) {
321         g_string_append_printf(buf, "  reporting CL:%016"PRIx64, cache_line);
322     }
323 
324     /*
325      * When the NVP is HW controlled, more fields are updated
326      */
327     if (xive2_nvp_is_hw(nvp)) {
328         g_string_append_printf(buf, " CPPR:%02x",
329                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
330         if (xive2_nvp_is_co(nvp)) {
331             g_string_append_printf(buf, " CO:%04x",
332                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
333         }
334     }
335     g_string_append_c(buf, '\n');
336 }
337 
338 void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
339 {
340     uint8_t i;
341 
342     if (!xive2_nvgc_is_valid(nvgc)) {
343         return;
344     }
345 
346     g_string_append_printf(buf, "  %08x PGoNext:%02x bklog: ", nvgc_idx,
347                            xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
348     for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
349         g_string_append_printf(buf, "[%d]=0x%x ",
350                                i, xive2_nvgc_get_backlog(nvgc, i));
351     }
352     g_string_append_printf(buf, "\n");
353 }
354 
355 static void xive2_end_enqueue(Xive2End *end, uint32_t data)
356 {
357     uint64_t qaddr_base = xive2_end_qaddr(end);
358     uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
359     uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
360 
361     uint64_t qaddr = qaddr_base + (qindex << 2);
362     uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
363     uint32_t qentries = xive2_end_get_qentries(end);
364 
365     if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
366                          MEMTXATTRS_UNSPECIFIED)) {
367         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
368                       HWADDR_PRIx "\n", qaddr);
369         return;
370     }
371 
372     qindex = (qindex + 1) & (qentries - 1);
373     if (qindex == 0) {
374         qgen ^= 1;
375         end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
376 
377         /* Set gen flipped to 1, it gets reset on a cache watch operation */
378         end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, 1);
379     }
380     end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
381 }
382 
383 static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx,
384                            uint8_t next_level)
385 {
386     uint32_t mask, next_idx;
387     uint8_t next_blk;
388 
389     /*
390      * Adjust the block and index of a VP for the next group/crowd
391      * size (PGofFirst/PGofNext field in the NVP and NVGC structures).
392      *
393      * The 6-bit group level is split into a 2-bit crowd and 4-bit
394      * group levels. Encoding is similar. However, we don't support
395      * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd
396      * size of 16.
397      */
398     next_blk = NVx_CROWD_LVL(next_level);
399     if (next_blk == 3) {
400         next_blk = 4;
401     }
402     mask = (1 << next_blk) - 1;
403     *nvgc_blk &= ~mask;
404     *nvgc_blk |= mask >> 1;
405 
406     next_idx = NVx_GROUP_LVL(next_level);
407     mask = (1 << next_idx) - 1;
408     *nvgc_idx &= ~mask;
409     *nvgc_idx |= mask >> 1;
410 }
411 
412 /*
413  * Scan the group chain and return the highest priority and group
414  * level of pending group interrupts.
415  */
416 static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr,
417                                             uint8_t nvx_blk, uint32_t nvx_idx,
418                                             uint8_t first_group,
419                                             uint8_t *out_level)
420 {
421     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
422     uint32_t nvgc_idx;
423     uint32_t current_level, count;
424     uint8_t nvgc_blk, prio;
425     Xive2Nvgc nvgc;
426 
427     for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) {
428         current_level = first_group & 0x3F;
429         nvgc_blk = nvx_blk;
430         nvgc_idx = nvx_idx;
431 
432         while (current_level) {
433             xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level);
434 
435             if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level),
436                                       nvgc_blk, nvgc_idx, &nvgc)) {
437                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
438                               nvgc_blk, nvgc_idx);
439                 return 0xFF;
440             }
441             if (!xive2_nvgc_is_valid(&nvgc)) {
442                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
443                               nvgc_blk, nvgc_idx);
444                 return 0xFF;
445             }
446 
447             count = xive2_nvgc_get_backlog(&nvgc, prio);
448             if (count) {
449                 *out_level = current_level;
450                 return prio;
451             }
452             current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F;
453         }
454     }
455     return 0xFF;
456 }
457 
458 static void xive2_presenter_backlog_decr(XivePresenter *xptr,
459                                          uint8_t nvx_blk, uint32_t nvx_idx,
460                                          uint8_t group_prio,
461                                          uint8_t group_level)
462 {
463     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
464     uint32_t nvgc_idx, count;
465     uint8_t nvgc_blk;
466     Xive2Nvgc nvgc;
467 
468     nvgc_blk = nvx_blk;
469     nvgc_idx = nvx_idx;
470     xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level);
471 
472     if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level),
473                               nvgc_blk, nvgc_idx, &nvgc)) {
474         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n",
475                       nvgc_blk, nvgc_idx);
476         return;
477     }
478     if (!xive2_nvgc_is_valid(&nvgc)) {
479         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n",
480                       nvgc_blk, nvgc_idx);
481         return;
482     }
483     count = xive2_nvgc_get_backlog(&nvgc, group_prio);
484     if (!count) {
485         return;
486     }
487     xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1);
488     xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level),
489                             nvgc_blk, nvgc_idx, &nvgc);
490 }
491 
492 /*
493  * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
494  *
495  * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
496  *
497  *   - if a context is enabled with the H bit set, the VP context
498  *     information is retrieved from the NVP structure (“check out”)
499  *     and stored back on a context pull (“check in”), the SW receives
500  *     the same context pull information as on P9
501  *
502  *   - the H bit cannot be changed while the V bit is set, i.e. a
503  *     context cannot be set up in the TIMA and then be “pushed” into
504  *     the NVP by changing the H bit while the context is enabled
505  */
506 
507 static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
508                                 uint8_t nvp_blk, uint32_t nvp_idx,
509                                 uint8_t ring)
510 {
511     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
512     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
513     Xive2Nvp nvp;
514     uint8_t *regs = &tctx->regs[ring];
515 
516     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
517         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
518                           nvp_blk, nvp_idx);
519         return;
520     }
521 
522     if (!xive2_nvp_is_valid(&nvp)) {
523         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
524                       nvp_blk, nvp_idx);
525         return;
526     }
527 
528     if (!xive2_nvp_is_hw(&nvp)) {
529         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
530                       nvp_blk, nvp_idx);
531         return;
532     }
533 
534     if (!xive2_nvp_is_co(&nvp)) {
535         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
536                       nvp_blk, nvp_idx);
537         return;
538     }
539 
540     if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
541         xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
542         qemu_log_mask(LOG_GUEST_ERROR,
543                       "XIVE: NVP %x/%x invalid checkout Thread %x\n",
544                       nvp_blk, nvp_idx, pir);
545         return;
546     }
547 
548     nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
549     nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
550     if (nvp.w0 & NVP2_W0_L) {
551         /*
552          * Typically not used. If LSMFB is restored with 0, it will
553          * force a backlog rescan
554          */
555         nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
556     }
557     if (nvp.w0 & NVP2_W0_G) {
558         nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]);
559     }
560     if (nvp.w0 & NVP2_W0_T) {
561         nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]);
562     }
563     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
564 
565     nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
566     /* NVP2_W1_CO_THRID_VALID only set once */
567     nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
568     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
569 }
570 
571 static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
572                              uint32_t *nvp_idx, bool *valid, bool *hw)
573 {
574     *nvp_blk = xive2_nvp_blk(cam);
575     *nvp_idx = xive2_nvp_idx(cam);
576     *valid = !!(cam & TM2_W2_VALID);
577     *hw = !!(cam & TM2_W2_HW);
578 }
579 
580 /*
581  * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
582  * width and block id width is configurable at the IC level.
583  *
584  *    chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
585  *    chipid << 24 | 0000 0000 0000 0001 threadid   (8Bit)
586  */
587 static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
588 {
589     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
590     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
591     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
592     uint8_t blk = xive2_router_get_block_id(xrtr);
593     uint8_t tid_shift =
594         xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
595     uint8_t tid_mask = (1 << tid_shift) - 1;
596 
597     return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
598 }
599 
600 static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
601                                   hwaddr offset, unsigned size, uint8_t ring)
602 {
603     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
604     uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
605     uint32_t cam = be32_to_cpu(target_ringw2);
606     uint8_t nvp_blk;
607     uint32_t nvp_idx;
608     uint8_t cur_ring;
609     bool valid;
610     bool do_save;
611 
612     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
613 
614     if (!valid) {
615         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
616                       nvp_blk, nvp_idx);
617     }
618 
619     /* Invalidate CAM line of requested ring and all lower rings */
620     for (cur_ring = TM_QW0_USER; cur_ring <= ring;
621          cur_ring += XIVE_TM_RING_SIZE) {
622         uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
623         uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
624         memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
625     }
626 
627     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
628         xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
629     }
630 
631     /*
632      * Lower external interrupt line of requested ring and below except for
633      * USER, which doesn't exist.
634      */
635     for (cur_ring = TM_QW1_OS; cur_ring <= ring;
636          cur_ring += XIVE_TM_RING_SIZE) {
637         xive_tctx_reset_signal(tctx, cur_ring);
638     }
639     return target_ringw2;
640 }
641 
642 uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
643                               hwaddr offset, unsigned size)
644 {
645     return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
646 }
647 
648 #define REPORT_LINE_GEN1_SIZE       16
649 
650 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
651                                       uint8_t size)
652 {
653     uint8_t *regs = tctx->regs;
654 
655     g_assert(size == REPORT_LINE_GEN1_SIZE);
656     memset(data, 0, size);
657     /*
658      * See xive architecture for description of what is saved. It is
659      * hand-picked information to fit in 16 bytes.
660      */
661     data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
662     data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
663     data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
664     data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
665     data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
666     data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
667     data[0x6] = 0xFF;
668     data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
669     data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
670     data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
671     data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
672     data[0x8] = regs[TM_QW1_OS + TM_NSR];
673     data[0x9] = regs[TM_QW1_OS + TM_CPPR];
674     data[0xA] = regs[TM_QW1_OS + TM_IPB];
675     data[0xB] = regs[TM_QW1_OS + TM_LGS];
676     if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
677         /*
678          * Logical server extension, except VU bit replaced by EB bit
679          * from NSR
680          */
681         data[0xC] = regs[TM_QW0_USER + TM_WORD2];
682         data[0xC] &= ~0x80;
683         data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
684         data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
685         data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
686         data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
687     }
688 }
689 
690 static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
691                                  hwaddr offset, uint64_t value,
692                                  unsigned size, uint8_t ring)
693 {
694     Xive2Router *xrtr = XIVE2_ROUTER(xptr);
695     uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
696     uint8_t nvp_blk;
697     Xive2Nvp nvp;
698     uint64_t phys_addr;
699     MemTxResult result;
700 
701     hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
702     nvp_blk = xive2_nvp_blk(hw_cam);
703     nvp_idx = xive2_nvp_idx(hw_cam);
704 
705     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
706         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
707                       nvp_blk, nvp_idx);
708         return;
709     }
710 
711     if (!xive2_nvp_is_valid(&nvp)) {
712         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
713                       nvp_blk, nvp_idx);
714         return;
715     }
716 
717     xive2_cfg = xive2_router_get_config(xrtr);
718 
719     phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
720     if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
721         uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
722 
723         xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
724         result = dma_memory_write(&address_space_memory, phys_addr,
725                                   pull_ctxt, REPORT_LINE_GEN1_SIZE,
726                                   MEMTXATTRS_UNSPECIFIED);
727         assert(result == MEMTX_OK);
728     } else {
729         result = dma_memory_write(&address_space_memory, phys_addr,
730                                   &tctx->regs, sizeof(tctx->regs),
731                                   MEMTXATTRS_UNSPECIFIED);
732         assert(result == MEMTX_OK);
733         reserved = 0xFFFFFFFF;
734         result = dma_memory_write(&address_space_memory, phys_addr + 12,
735                                   &reserved, sizeof(reserved),
736                                   MEMTXATTRS_UNSPECIFIED);
737         assert(result == MEMTX_OK);
738     }
739 
740     /* the rest is similar to pull context to registers */
741     xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
742 }
743 
744 void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
745                              hwaddr offset, uint64_t value, unsigned size)
746 {
747     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
748 }
749 
750 
751 void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
752                                hwaddr offset, uint64_t value, unsigned size)
753 {
754     xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
755 }
756 
757 static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
758                                         uint8_t nvp_blk, uint32_t nvp_idx,
759                                         Xive2Nvp *nvp)
760 {
761     CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
762     uint32_t pir = env->spr_cb[SPR_PIR].default_value;
763     uint8_t cppr;
764 
765     if (!xive2_nvp_is_hw(nvp)) {
766         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
767                       nvp_blk, nvp_idx);
768         return 0;
769     }
770 
771     cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
772     nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
773     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
774 
775     tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
776     tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2);
777     tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2);
778     tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2);
779 
780     nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
781     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
782     nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
783 
784     /*
785      * Checkout privilege: 0:OS, 1:Pool, 2:Hard
786      *
787      * TODO: we only support OS push/pull
788      */
789     nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
790 
791     xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
792 
793     /* return restored CPPR to generate a CPU exception if needed */
794     return cppr;
795 }
796 
797 static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
798                                    uint8_t nvp_blk, uint32_t nvp_idx,
799                                    bool do_restore)
800 {
801     XivePresenter *xptr = XIVE_PRESENTER(xrtr);
802     uint8_t ipb;
803     uint8_t backlog_level;
804     uint8_t group_level;
805     uint8_t first_group;
806     uint8_t backlog_prio;
807     uint8_t group_prio;
808     uint8_t *regs = &tctx->regs[TM_QW1_OS];
809     Xive2Nvp nvp;
810 
811     /*
812      * Grab the associated thread interrupt context registers in the
813      * associated NVP
814      */
815     if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
816         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
817                       nvp_blk, nvp_idx);
818         return;
819     }
820 
821     if (!xive2_nvp_is_valid(&nvp)) {
822         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
823                       nvp_blk, nvp_idx);
824         return;
825     }
826 
827     /* Automatically restore thread context registers */
828     if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
829         do_restore) {
830         xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
831     }
832 
833     ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
834     if (ipb) {
835         nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
836         xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
837     }
838     /* IPB bits in the backlog are merged with the TIMA IPB bits */
839     regs[TM_IPB] |= ipb;
840     backlog_prio = xive_ipb_to_pipr(regs[TM_IPB]);
841     backlog_level = 0;
842 
843     first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
844     if (first_group && regs[TM_LSMFB] < backlog_prio) {
845         group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx,
846                                                   first_group, &group_level);
847         regs[TM_LSMFB] = group_prio;
848         if (regs[TM_LGS] && group_prio < backlog_prio) {
849             /* VP can take a group interrupt */
850             xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx,
851                                          group_prio, group_level);
852             backlog_prio = group_prio;
853             backlog_level = group_level;
854         }
855     }
856 
857     /*
858      * Compute the PIPR based on the restored state.
859      * It will raise the External interrupt signal if needed.
860      */
861     xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
862 }
863 
864 /*
865  * Updating the OS CAM line can trigger a resend of interrupt
866  */
867 void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
868                           hwaddr offset, uint64_t value, unsigned size)
869 {
870     uint32_t cam;
871     uint32_t qw1w2;
872     uint64_t qw1dw1;
873     uint8_t nvp_blk;
874     uint32_t nvp_idx;
875     bool vo;
876     bool do_restore;
877 
878     /* First update the thead context */
879     switch (size) {
880     case 4:
881         cam = value;
882         qw1w2 = cpu_to_be32(cam);
883         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
884         break;
885     case 8:
886         cam = value >> 32;
887         qw1dw1 = cpu_to_be64(value);
888         memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
889         break;
890     default:
891         g_assert_not_reached();
892     }
893 
894     xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
895 
896     /* Check the interrupt pending bits */
897     if (vo) {
898         xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
899                                do_restore);
900     }
901 }
902 
903 static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring,
904                                       uint32_t *nvp_blk, uint32_t *nvp_idx)
905 {
906     uint32_t w2, cam;
907 
908     w2 = xive_tctx_word2(&tctx->regs[ring]);
909     switch (ring) {
910     case TM_QW1_OS:
911         if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) {
912             return -1;
913         }
914         cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2);
915         break;
916     case TM_QW2_HV_POOL:
917         if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) {
918             return -1;
919         }
920         cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2);
921         break;
922     case TM_QW3_HV_PHYS:
923         if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) {
924             return -1;
925         }
926         cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx);
927         break;
928     default:
929         return -1;
930     }
931     *nvp_blk = xive2_nvp_blk(cam);
932     *nvp_idx = xive2_nvp_idx(cam);
933     return 0;
934 }
935 
936 static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
937 {
938     uint8_t *regs = &tctx->regs[ring];
939     Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr);
940     uint8_t old_cppr, backlog_prio, first_group, group_level = 0;
941     uint8_t pipr_min, lsmfb_min, ring_min;
942     bool group_enabled;
943     uint32_t nvp_blk, nvp_idx;
944     Xive2Nvp nvp;
945     int rc;
946 
947     trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
948                              regs[TM_IPB], regs[TM_PIPR],
949                              cppr, regs[TM_NSR]);
950 
951     if (cppr > XIVE_PRIORITY_MAX) {
952         cppr = 0xff;
953     }
954 
955     old_cppr = regs[TM_CPPR];
956     regs[TM_CPPR] = cppr;
957 
958     /*
959      * Recompute the PIPR based on local pending interrupts. It will
960      * be adjusted below if needed in case of pending group interrupts.
961      */
962     pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
963     group_enabled = !!regs[TM_LGS];
964     lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff;
965     ring_min = ring;
966 
967     /* PHYS updates also depend on POOL values */
968     if (ring == TM_QW3_HV_PHYS) {
969         uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL];
970 
971         /* POOL values only matter if POOL ctx is valid */
972         if (pregs[TM_WORD2] & 0x80) {
973 
974             uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]);
975             uint8_t pool_lsmfb = pregs[TM_LSMFB];
976 
977             /*
978              * Determine highest priority interrupt and
979              * remember which ring has it.
980              */
981             if (pool_pipr < pipr_min) {
982                 pipr_min = pool_pipr;
983                 if (pool_pipr < lsmfb_min) {
984                     ring_min = TM_QW2_HV_POOL;
985                 }
986             }
987 
988             /* Values needed for group priority calculation */
989             if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) {
990                 group_enabled = true;
991                 lsmfb_min = pool_lsmfb;
992                 if (lsmfb_min < pipr_min) {
993                     ring_min = TM_QW2_HV_POOL;
994                 }
995             }
996         }
997     }
998     regs[TM_PIPR] = pipr_min;
999 
1000     rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx);
1001     if (rc) {
1002         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n");
1003         return;
1004     }
1005 
1006     if (cppr < old_cppr) {
1007         /*
1008          * FIXME: check if there's a group interrupt being presented
1009          * and if the new cppr prevents it. If so, then the group
1010          * interrupt needs to be re-added to the backlog and
1011          * re-triggered (see re-trigger END info in the NVGC
1012          * structure)
1013          */
1014     }
1015 
1016     if (group_enabled &&
1017         lsmfb_min < cppr &&
1018         lsmfb_min < regs[TM_PIPR]) {
1019         /*
1020          * Thread has seen a group interrupt with a higher priority
1021          * than the new cppr or pending local interrupt. Check the
1022          * backlog
1023          */
1024         if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
1025             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
1026                           nvp_blk, nvp_idx);
1027             return;
1028         }
1029 
1030         if (!xive2_nvp_is_valid(&nvp)) {
1031             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
1032                           nvp_blk, nvp_idx);
1033             return;
1034         }
1035 
1036         first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
1037         if (!first_group) {
1038             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
1039                           nvp_blk, nvp_idx);
1040             return;
1041         }
1042 
1043         backlog_prio = xive2_presenter_backlog_scan(tctx->xptr,
1044                                                     nvp_blk, nvp_idx,
1045                                                     first_group, &group_level);
1046         tctx->regs[ring_min + TM_LSMFB] = backlog_prio;
1047         if (backlog_prio != 0xFF) {
1048             xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx,
1049                                          backlog_prio, group_level);
1050             regs[TM_PIPR] = backlog_prio;
1051         }
1052     }
1053     /* CPPR has changed, check if we need to raise a pending exception */
1054     xive_tctx_notify(tctx, ring_min, group_level);
1055 }
1056 
1057 void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
1058                           hwaddr offset, uint64_t value, unsigned size)
1059 {
1060     xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
1061 }
1062 
1063 void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
1064                           hwaddr offset, uint64_t value, unsigned size)
1065 {
1066     xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
1067 }
1068 
1069 static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
1070 {
1071     uint8_t *regs = &tctx->regs[ring];
1072 
1073     regs[TM_T] = target;
1074 }
1075 
1076 void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
1077                             hwaddr offset, uint64_t value, unsigned size)
1078 {
1079     xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
1080 }
1081 
1082 /*
1083  * XIVE Router (aka. Virtualization Controller or IVRE)
1084  */
1085 
1086 int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1087                          Xive2Eas *eas)
1088 {
1089     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1090 
1091     return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1092 }
1093 
1094 static
1095 int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1096                        uint8_t *pq)
1097 {
1098     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1099 
1100     return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
1101 }
1102 
1103 static
1104 int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1105                        uint8_t *pq)
1106 {
1107     Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1108 
1109     return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
1110 }
1111 
1112 int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
1113                          Xive2End *end)
1114 {
1115    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1116 
1117    return xrc->get_end(xrtr, end_blk, end_idx, end);
1118 }
1119 
1120 int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
1121                            Xive2End *end, uint8_t word_number)
1122 {
1123    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1124 
1125    return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1126 }
1127 
1128 int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
1129                          Xive2Nvp *nvp)
1130 {
1131    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1132 
1133    return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
1134 }
1135 
1136 int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
1137                            Xive2Nvp *nvp, uint8_t word_number)
1138 {
1139    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1140 
1141    return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
1142 }
1143 
1144 int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
1145                           uint8_t nvgc_blk, uint32_t nvgc_idx,
1146                           Xive2Nvgc *nvgc)
1147 {
1148    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1149 
1150    return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
1151 }
1152 
1153 int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
1154                             uint8_t nvgc_blk, uint32_t nvgc_idx,
1155                             Xive2Nvgc *nvgc)
1156 {
1157    Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
1158 
1159    return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
1160 }
1161 
1162 static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2,
1163                                 uint32_t vp_mask)
1164 {
1165     return (cam1 & vp_mask) == (cam2 & vp_mask);
1166 }
1167 
1168 static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd)
1169 {
1170     uint8_t block_mask = 0b1111;
1171 
1172     /* 3 supported crowd sizes: 2, 4, 16 */
1173     if (crowd) {
1174         uint32_t size = xive_get_vpgroup_size(nvt_blk);
1175 
1176         if (size != 2 && size != 4 && size != 16) {
1177             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of %d",
1178                                            size);
1179             return block_mask;
1180         }
1181         block_mask &= ~(size - 1);
1182     }
1183     return block_mask;
1184 }
1185 
1186 static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore)
1187 {
1188     uint32_t index_mask = 0xFFFFFF; /* 24 bits */
1189 
1190     if (cam_ignore) {
1191         uint32_t size = xive_get_vpgroup_size(nvt_index);
1192 
1193         if (size < 2) {
1194             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group size of %d",
1195                                            size);
1196             return index_mask;
1197         }
1198         index_mask &= ~(size - 1);
1199     }
1200     return index_mask;
1201 }
1202 
1203 /*
1204  * The thread context register words are in big-endian format.
1205  */
1206 int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1207                                uint8_t format,
1208                                uint8_t nvt_blk, uint32_t nvt_idx,
1209                                bool crowd, bool cam_ignore,
1210                                uint32_t logic_serv)
1211 {
1212     uint32_t cam =   xive2_nvp_cam_line(nvt_blk, nvt_idx);
1213     uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1214     uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1215     uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1216     uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1217 
1218     uint32_t index_mask, vp_mask;
1219     uint8_t block_mask;
1220 
1221     if (format == 0) {
1222         /*
1223          * i=0: Specific NVT notification
1224          * i=1: VP-group notification (bits ignored at the end of the
1225          *      NVT identifier)
1226          */
1227         block_mask = xive2_get_vp_block_mask(nvt_blk, crowd);
1228         index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore);
1229         vp_mask = xive2_nvp_cam_line(block_mask, index_mask);
1230 
1231         /* For VP-group notifications, threads with LGS=0 are excluded */
1232 
1233         /* PHYS ring */
1234         if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
1235             !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) &&
1236             xive2_vp_match_mask(cam,
1237                                 xive2_tctx_hw_cam_line(xptr, tctx),
1238                                 vp_mask)) {
1239             return TM_QW3_HV_PHYS;
1240         }
1241 
1242         /* HV POOL ring */
1243         if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
1244             !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) &&
1245             xive2_vp_match_mask(cam,
1246                                 xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2),
1247                                 vp_mask)) {
1248             return TM_QW2_HV_POOL;
1249         }
1250 
1251         /* OS ring */
1252         if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
1253             !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) &&
1254             xive2_vp_match_mask(cam,
1255                                 xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2),
1256                                 vp_mask)) {
1257             return TM_QW1_OS;
1258         }
1259     } else {
1260         /* F=1 : User level Event-Based Branch (EBB) notification */
1261 
1262         /* FIXME: what if cam_ignore and LGS = 0 ? */
1263         /* USER ring */
1264         if  ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
1265              (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
1266              (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
1267              (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
1268             return TM_QW0_USER;
1269         }
1270     }
1271     return -1;
1272 }
1273 
1274 bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
1275 {
1276     /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
1277     uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
1278     uint8_t *alt_regs = &tctx->regs[alt_ring];
1279 
1280     /*
1281      * The xive2_presenter_tctx_match() above tells if there's a match
1282      * but for VP-group notification, we still need to look at the
1283      * priority to know if the thread can take the interrupt now or if
1284      * it is precluded.
1285      */
1286     if (priority < alt_regs[TM_PIPR]) {
1287         return false;
1288     }
1289     return true;
1290 }
1291 
1292 void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority)
1293 {
1294     uint8_t *regs = &tctx->regs[ring];
1295 
1296     /*
1297      * Called by the router during a VP-group notification when the
1298      * thread matches but can't take the interrupt because it's
1299      * already running at a more favored priority. It then stores the
1300      * new interrupt priority in the LSMFB field.
1301      */
1302     regs[TM_LSMFB] = priority;
1303 }
1304 
1305 static void xive2_router_realize(DeviceState *dev, Error **errp)
1306 {
1307     Xive2Router *xrtr = XIVE2_ROUTER(dev);
1308 
1309     assert(xrtr->xfb);
1310 }
1311 
1312 /*
1313  * Notification using the END ESe/ESn bit (Event State Buffer for
1314  * escalation and notification). Profide further coalescing in the
1315  * Router.
1316  */
1317 static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
1318                                        uint32_t end_idx, Xive2End *end,
1319                                        uint32_t end_esmask)
1320 {
1321     uint8_t pq = xive_get_field32(end_esmask, end->w1);
1322     bool notify = xive_esb_trigger(&pq);
1323 
1324     if (pq != xive_get_field32(end_esmask, end->w1)) {
1325         end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1326         xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
1327     }
1328 
1329     /* ESe/n[Q]=1 : end of notification */
1330     return notify;
1331 }
1332 
1333 /*
1334  * An END trigger can come from an event trigger (IPI or HW) or from
1335  * another chip. We don't model the PowerBus but the END trigger
1336  * message has the same parameters than in the function below.
1337  */
1338 static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
1339                                     uint32_t end_idx, uint32_t end_data)
1340 {
1341     Xive2End end;
1342     uint8_t priority;
1343     uint8_t format;
1344     bool found, precluded;
1345     uint8_t nvx_blk;
1346     uint32_t nvx_idx;
1347 
1348     /* END cache lookup */
1349     if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
1350         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1351                       end_idx);
1352         return;
1353     }
1354 
1355     if (!xive2_end_is_valid(&end)) {
1356         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1357                       end_blk, end_idx);
1358         return;
1359     }
1360 
1361     if (xive2_end_is_crowd(&end) && !xive2_end_is_ignore(&end)) {
1362         qemu_log_mask(LOG_GUEST_ERROR,
1363                       "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n");
1364         return;
1365     }
1366 
1367     if (xive2_end_is_enqueue(&end)) {
1368         xive2_end_enqueue(&end, end_data);
1369         /* Enqueuing event data modifies the EQ toggle and index */
1370         xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1371     }
1372 
1373     /*
1374      * When the END is silent, we skip the notification part.
1375      */
1376     if (xive2_end_is_silent_escalation(&end)) {
1377         goto do_escalation;
1378     }
1379 
1380     /*
1381      * The W7 format depends on the F bit in W6. It defines the type
1382      * of the notification :
1383      *
1384      *   F=0 : single or multiple NVP notification
1385      *   F=1 : User level Event-Based Branch (EBB) notification, no
1386      *         priority
1387      */
1388     format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
1389     priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
1390 
1391     /* The END is masked */
1392     if (format == 0 && priority == 0xff) {
1393         return;
1394     }
1395 
1396     /*
1397      * Check the END ESn (Event State Buffer for notification) for
1398      * even further coalescing in the Router
1399      */
1400     if (!xive2_end_is_notify(&end)) {
1401         /* ESn[Q]=1 : end of notification */
1402         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
1403                                        &end, END2_W1_ESn)) {
1404             return;
1405         }
1406     }
1407 
1408     /*
1409      * Follows IVPE notification
1410      */
1411     nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
1412     nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
1413 
1414     found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx,
1415                           xive2_end_is_crowd(&end), xive2_end_is_ignore(&end),
1416                           priority,
1417                           xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7),
1418                           &precluded);
1419 
1420     /* TODO: Auto EOI. */
1421 
1422     if (found) {
1423         return;
1424     }
1425 
1426     /*
1427      * If no matching NVP is dispatched on a HW thread :
1428      * - specific VP: update the NVP structure if backlog is activated
1429      * - VP-group: update the backlog counter for that priority in the NVG
1430      */
1431     if (xive2_end_is_backlog(&end)) {
1432 
1433         if (format == 1) {
1434             qemu_log_mask(LOG_GUEST_ERROR,
1435                           "XIVE: END %x/%x invalid config: F1 & backlog\n",
1436                           end_blk, end_idx);
1437             return;
1438         }
1439 
1440         if (!xive2_end_is_ignore(&end)) {
1441             uint8_t ipb;
1442             Xive2Nvp nvp;
1443 
1444             /* NVP cache lookup */
1445             if (xive2_router_get_nvp(xrtr, nvx_blk, nvx_idx, &nvp)) {
1446                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
1447                               nvx_blk, nvx_idx);
1448                 return;
1449             }
1450 
1451             if (!xive2_nvp_is_valid(&nvp)) {
1452                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
1453                               nvx_blk, nvx_idx);
1454                 return;
1455             }
1456 
1457             /*
1458              * Record the IPB in the associated NVP structure for later
1459              * use. The presenter will resend the interrupt when the vCPU
1460              * is dispatched again on a HW thread.
1461              */
1462             ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
1463                 xive_priority_to_ipb(priority);
1464             nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
1465             xive2_router_write_nvp(xrtr, nvx_blk, nvx_idx, &nvp, 2);
1466         } else {
1467             Xive2Nvgc nvgc;
1468             uint32_t backlog;
1469             bool crowd;
1470 
1471             crowd = xive2_end_is_crowd(&end);
1472 
1473             /*
1474              * For groups and crowds, the per-priority backlog
1475              * counters are stored in the NVG/NVC structures
1476              */
1477             if (xive2_router_get_nvgc(xrtr, crowd,
1478                                       nvx_blk, nvx_idx, &nvgc)) {
1479                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n",
1480                               crowd ? "NVC" : "NVG", nvx_blk, nvx_idx);
1481                 return;
1482             }
1483 
1484             if (!xive2_nvgc_is_valid(&nvgc)) {
1485                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n",
1486                               nvx_blk, nvx_idx);
1487                 return;
1488             }
1489 
1490             /*
1491              * Increment the backlog counter for that priority.
1492              * We only call broadcast the first time the counter is
1493              * incremented. broadcast will set the LSMFB field of the TIMA of
1494              * relevant threads so that they know an interrupt is pending.
1495              */
1496             backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1;
1497             xive2_nvgc_set_backlog(&nvgc, priority, backlog);
1498             xive2_router_write_nvgc(xrtr, crowd, nvx_blk, nvx_idx, &nvgc);
1499 
1500             if (backlog == 1) {
1501                 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb);
1502                 xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx,
1503                                xive2_end_is_crowd(&end),
1504                                xive2_end_is_ignore(&end),
1505                                priority);
1506 
1507                 if (!xive2_end_is_precluded_escalation(&end)) {
1508                     /*
1509                      * The interrupt will be picked up when the
1510                      * matching thread lowers its priority level
1511                      */
1512                     return;
1513                 }
1514             }
1515         }
1516     }
1517 
1518 do_escalation:
1519     /*
1520      * If activated, escalate notification using the ESe PQ bits and
1521      * the EAS in w4-5
1522      */
1523     if (!xive2_end_is_escalate(&end)) {
1524         return;
1525     }
1526 
1527     /*
1528      * Check the END ESe (Event State Buffer for escalation) for even
1529      * further coalescing in the Router
1530      */
1531     if (!xive2_end_is_uncond_escalation(&end)) {
1532         /* ESe[Q]=1 : end of escalation notification */
1533         if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
1534                                        &end, END2_W1_ESe)) {
1535             return;
1536         }
1537     }
1538 
1539     /*
1540      * The END trigger becomes an Escalation trigger
1541      */
1542     xive2_router_end_notify(xrtr,
1543                            xive_get_field32(END2_W4_END_BLOCK,     end.w4),
1544                            xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
1545                            xive_get_field32(END2_W5_ESC_END_DATA,  end.w5));
1546 }
1547 
1548 void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
1549 {
1550     Xive2Router *xrtr = XIVE2_ROUTER(xn);
1551     uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1552     uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1553     Xive2Eas eas;
1554 
1555     /* EAS cache lookup */
1556     if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1557         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1558         return;
1559     }
1560 
1561     if (!pq_checked) {
1562         bool notify;
1563         uint8_t pq;
1564 
1565         /* PQ cache lookup */
1566         if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
1567             /* Set FIR */
1568             g_assert_not_reached();
1569         }
1570 
1571         notify = xive_esb_trigger(&pq);
1572 
1573         if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
1574             /* Set FIR */
1575             g_assert_not_reached();
1576         }
1577 
1578         if (!notify) {
1579             return;
1580         }
1581     }
1582 
1583     if (!xive2_eas_is_valid(&eas)) {
1584         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
1585         return;
1586     }
1587 
1588     if (xive2_eas_is_masked(&eas)) {
1589         /* Notification completed */
1590         return;
1591     }
1592 
1593     /*
1594      * The event trigger becomes an END trigger
1595      */
1596     xive2_router_end_notify(xrtr,
1597                              xive_get_field64(EAS2_END_BLOCK, eas.w),
1598                              xive_get_field64(EAS2_END_INDEX, eas.w),
1599                              xive_get_field64(EAS2_END_DATA,  eas.w));
1600 }
1601 
1602 static const Property xive2_router_properties[] = {
1603     DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
1604                      TYPE_XIVE_FABRIC, XiveFabric *),
1605 };
1606 
1607 static void xive2_router_class_init(ObjectClass *klass, const void *data)
1608 {
1609     DeviceClass *dc = DEVICE_CLASS(klass);
1610     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1611 
1612     dc->desc    = "XIVE2 Router Engine";
1613     device_class_set_props(dc, xive2_router_properties);
1614     /* Parent is SysBusDeviceClass. No need to call its realize hook */
1615     dc->realize = xive2_router_realize;
1616     xnc->notify = xive2_router_notify;
1617 }
1618 
1619 static const TypeInfo xive2_router_info = {
1620     .name          = TYPE_XIVE2_ROUTER,
1621     .parent        = TYPE_SYS_BUS_DEVICE,
1622     .abstract      = true,
1623     .instance_size = sizeof(Xive2Router),
1624     .class_size    = sizeof(Xive2RouterClass),
1625     .class_init    = xive2_router_class_init,
1626     .interfaces    = (const InterfaceInfo[]) {
1627         { TYPE_XIVE_NOTIFIER },
1628         { TYPE_XIVE_PRESENTER },
1629         { }
1630     }
1631 };
1632 
1633 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
1634 {
1635     return !((addr >> shift) & 1);
1636 }
1637 
1638 static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
1639 {
1640     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1641     uint32_t offset = addr & 0xFFF;
1642     uint8_t end_blk;
1643     uint32_t end_idx;
1644     Xive2End end;
1645     uint32_t end_esmask;
1646     uint8_t pq;
1647     uint64_t ret;
1648 
1649     /*
1650      * The block id should be deduced from the load address on the END
1651      * ESB MMIO but our model only supports a single block per XIVE chip.
1652      */
1653     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1654     end_idx = addr >> (xsrc->esb_shift + 1);
1655 
1656     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1657         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1658                       end_idx);
1659         return -1;
1660     }
1661 
1662     if (!xive2_end_is_valid(&end)) {
1663         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1664                       end_blk, end_idx);
1665         return -1;
1666     }
1667 
1668     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1669         END2_W1_ESe;
1670     pq = xive_get_field32(end_esmask, end.w1);
1671 
1672     switch (offset) {
1673     case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1674         ret = xive_esb_eoi(&pq);
1675 
1676         /* Forward the source event notification for routing ?? */
1677         break;
1678 
1679     case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1680         ret = pq;
1681         break;
1682 
1683     case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1684     case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1685     case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1686     case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1687         ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1688         break;
1689     default:
1690         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1691                       offset);
1692         return -1;
1693     }
1694 
1695     if (pq != xive_get_field32(end_esmask, end.w1)) {
1696         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1697         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1698     }
1699 
1700     return ret;
1701 }
1702 
1703 static void xive2_end_source_write(void *opaque, hwaddr addr,
1704                                    uint64_t value, unsigned size)
1705 {
1706     Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
1707     uint32_t offset = addr & 0xFFF;
1708     uint8_t end_blk;
1709     uint32_t end_idx;
1710     Xive2End end;
1711     uint32_t end_esmask;
1712     uint8_t pq;
1713     bool notify = false;
1714 
1715     /*
1716      * The block id should be deduced from the load address on the END
1717      * ESB MMIO but our model only supports a single block per XIVE chip.
1718      */
1719     end_blk = xive2_router_get_block_id(xsrc->xrtr);
1720     end_idx = addr >> (xsrc->esb_shift + 1);
1721 
1722     if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1723         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1724                       end_idx);
1725         return;
1726     }
1727 
1728     if (!xive2_end_is_valid(&end)) {
1729         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1730                       end_blk, end_idx);
1731         return;
1732     }
1733 
1734     end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
1735         END2_W1_ESe;
1736     pq = xive_get_field32(end_esmask, end.w1);
1737 
1738     switch (offset) {
1739     case 0 ... 0x3FF:
1740         notify = xive_esb_trigger(&pq);
1741         break;
1742 
1743     case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1744         /* TODO: can we check StoreEOI availability from the router ? */
1745         notify = xive_esb_eoi(&pq);
1746         break;
1747 
1748     case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1749         if (end_esmask == END2_W1_ESe) {
1750             qemu_log_mask(LOG_GUEST_ERROR,
1751                           "XIVE: END %x/%x can not EQ inject on ESe\n",
1752                            end_blk, end_idx);
1753             return;
1754         }
1755         notify = true;
1756         break;
1757 
1758     default:
1759         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
1760                       offset);
1761         return;
1762     }
1763 
1764     if (pq != xive_get_field32(end_esmask, end.w1)) {
1765         end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1766         xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1767     }
1768 
1769     /* TODO: Forward the source event notification for routing */
1770     if (notify) {
1771         ;
1772     }
1773 }
1774 
1775 static const MemoryRegionOps xive2_end_source_ops = {
1776     .read = xive2_end_source_read,
1777     .write = xive2_end_source_write,
1778     .endianness = DEVICE_BIG_ENDIAN,
1779     .valid = {
1780         .min_access_size = 1,
1781         .max_access_size = 8,
1782     },
1783     .impl = {
1784         .min_access_size = 1,
1785         .max_access_size = 8,
1786     },
1787 };
1788 
1789 static void xive2_end_source_realize(DeviceState *dev, Error **errp)
1790 {
1791     Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
1792 
1793     assert(xsrc->xrtr);
1794 
1795     if (!xsrc->nr_ends) {
1796         error_setg(errp, "Number of interrupt needs to be greater than 0");
1797         return;
1798     }
1799 
1800     if (xsrc->esb_shift != XIVE_ESB_4K &&
1801         xsrc->esb_shift != XIVE_ESB_64K) {
1802         error_setg(errp, "Invalid ESB shift setting");
1803         return;
1804     }
1805 
1806     /*
1807      * Each END is assigned an even/odd pair of MMIO pages, the even page
1808      * manages the ESn field while the odd page manages the ESe field.
1809      */
1810     memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1811                           &xive2_end_source_ops, xsrc, "xive.end",
1812                           (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1813 }
1814 
1815 static const Property xive2_end_source_properties[] = {
1816     DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
1817     DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
1818     DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
1819                      Xive2Router *),
1820 };
1821 
1822 static void xive2_end_source_class_init(ObjectClass *klass, const void *data)
1823 {
1824     DeviceClass *dc = DEVICE_CLASS(klass);
1825 
1826     dc->desc    = "XIVE END Source";
1827     device_class_set_props(dc, xive2_end_source_properties);
1828     dc->realize = xive2_end_source_realize;
1829     dc->user_creatable = false;
1830 }
1831 
1832 static const TypeInfo xive2_end_source_info = {
1833     .name          = TYPE_XIVE2_END_SOURCE,
1834     .parent        = TYPE_DEVICE,
1835     .instance_size = sizeof(Xive2EndSource),
1836     .class_init    = xive2_end_source_class_init,
1837 };
1838 
1839 static void xive2_register_types(void)
1840 {
1841     type_register_static(&xive2_router_info);
1842     type_register_static(&xive2_end_source_info);
1843 }
1844 
1845 type_init(xive2_register_types)
1846