xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision da3c22c7)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/pnv.h"
19 #include "hw/ppc/pnv_chip.h"
20 #include "hw/ppc/pnv_core.h"
21 #include "hw/ppc/pnv_xscom.h"
22 #include "hw/ppc/xive2.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/ppc/xive2_regs.h"
26 #include "hw/ppc/ppc.h"
27 #include "hw/qdev-properties.h"
28 #include "sysemu/reset.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive2_regs.h"
33 
34 #undef XIVE2_DEBUG
35 
36 /*
37  * Virtual structures table (VST)
38  */
39 #define SBE_PER_BYTE   4
40 
41 typedef struct XiveVstInfo {
42     const char *name;
43     uint32_t    size;
44     uint32_t    max_blocks;
45 } XiveVstInfo;
46 
47 static const XiveVstInfo vst_infos[] = {
48 
49     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),  16 },
50     [VST_ESB]  = { "ESB",  1,                  16 },
51     [VST_END]  = { "ENDT", sizeof(Xive2End),  16 },
52 
53     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),  16 },
54     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc), 16 },
55     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc), 16 },
56 
57     [VST_IC]  =  { "IC",   1 /* ? */         , 16 }, /* Topology # */
58     [VST_SYNC] = { "SYNC", 1 /* ? */         , 16 }, /* Topology # */
59 
60     /*
61      * This table contains the backing store pages for the interrupt
62      * fifos of the VC sub-engine in case of overflow.
63      *
64      * 0 - IPI,
65      * 1 - HWD,
66      * 2 - NxC,
67      * 3 - INT,
68      * 4 - OS-Queue,
69      * 5 - Pool-Queue,
70      * 6 - Hard-Queue
71      */
72     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
73 };
74 
75 #define xive2_error(xive, fmt, ...)                                      \
76     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
77                   (xive)->chip->chip_id, ## __VA_ARGS__);
78 
79 /*
80  * TODO: Document block id override
81  */
82 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
83 {
84     uint8_t blk = xive->chip->chip_id;
85     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
86 
87     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
88         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
89     }
90 
91     return blk;
92 }
93 
94 /*
95  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
96  * of the chips is good enough.
97  *
98  * TODO: Block scope support
99  */
100 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
101 {
102     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
103     int i;
104 
105     for (i = 0; i < pnv->num_chips; i++) {
106         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
107         PnvXive2 *xive = &chip10->xive;
108 
109         if (pnv_xive2_block_id(xive) == blk) {
110             return xive;
111         }
112     }
113     return NULL;
114 }
115 
116 /*
117  * VST accessors for ESB, EAT, ENDT, NVP
118  *
119  * Indirect VST tables are arrays of VSDs pointing to a page (of same
120  * size). Each page is a direct VST table.
121  */
122 
123 #define XIVE_VSD_SIZE 8
124 
125 /* Indirect page size can be 4K, 64K, 2M, 16M. */
126 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
127 {
128      return page_shift == 12 || page_shift == 16 ||
129          page_shift == 21 || page_shift == 24;
130 }
131 
132 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
133                                           uint64_t vsd, uint32_t idx)
134 {
135     const XiveVstInfo *info = &vst_infos[type];
136     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
137     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
138     uint32_t idx_max;
139 
140     idx_max = vst_tsize / info->size - 1;
141     if (idx > idx_max) {
142 #ifdef XIVE2_DEBUG
143         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
144                    info->name, idx, idx_max);
145 #endif
146         return 0;
147     }
148 
149     return vst_addr + idx * info->size;
150 }
151 
152 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
153                                             uint64_t vsd, uint32_t idx)
154 {
155     const XiveVstInfo *info = &vst_infos[type];
156     uint64_t vsd_addr;
157     uint32_t vsd_idx;
158     uint32_t page_shift;
159     uint32_t vst_per_page;
160 
161     /* Get the page size of the indirect table. */
162     vsd_addr = vsd & VSD_ADDRESS_MASK;
163     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
164 
165     if (!(vsd & VSD_ADDRESS_MASK)) {
166 #ifdef XIVE2_DEBUG
167         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
168 #endif
169         return 0;
170     }
171 
172     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
173 
174     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
175         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
176                    page_shift);
177         return 0;
178     }
179 
180     vst_per_page = (1ull << page_shift) / info->size;
181     vsd_idx = idx / vst_per_page;
182 
183     /* Load the VSD we are looking for, if not already done */
184     if (vsd_idx) {
185         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
186         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
187                    MEMTXATTRS_UNSPECIFIED);
188 
189         if (!(vsd & VSD_ADDRESS_MASK)) {
190 #ifdef XIVE2_DEBUG
191             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
192 #endif
193             return 0;
194         }
195 
196         /*
197          * Check that the pages have a consistent size across the
198          * indirect table
199          */
200         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
201             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
202                        info->name, idx);
203             return 0;
204         }
205     }
206 
207     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
208 }
209 
210 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
211                                    uint32_t idx)
212 {
213     const XiveVstInfo *info = &vst_infos[type];
214     uint64_t vsd;
215 
216     if (blk >= info->max_blocks) {
217         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
218                    blk, info->name, idx);
219         return 0;
220     }
221 
222     vsd = xive->vsds[type][blk];
223 
224     /* Remote VST access */
225     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
226         xive = pnv_xive2_get_remote(blk);
227 
228         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
229     }
230 
231     if (VSD_INDIRECT & vsd) {
232         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
233     }
234 
235     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
236 }
237 
238 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
239                              uint32_t idx, void *data)
240 {
241     const XiveVstInfo *info = &vst_infos[type];
242     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
243     MemTxResult result;
244 
245     if (!addr) {
246         return -1;
247     }
248 
249     result = address_space_read(&address_space_memory, addr,
250                                 MEMTXATTRS_UNSPECIFIED, data,
251                                 info->size);
252     if (result != MEMTX_OK) {
253         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
254                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
255         return -1;
256     }
257     return 0;
258 }
259 
260 #define XIVE_VST_WORD_ALL -1
261 
262 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
263                                uint32_t idx, void *data, uint32_t word_number)
264 {
265     const XiveVstInfo *info = &vst_infos[type];
266     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
267     MemTxResult result;
268 
269     if (!addr) {
270         return -1;
271     }
272 
273     if (word_number == XIVE_VST_WORD_ALL) {
274         result = address_space_write(&address_space_memory, addr,
275                                      MEMTXATTRS_UNSPECIFIED, data,
276                                      info->size);
277     } else {
278         result = address_space_write(&address_space_memory,
279                                      addr + word_number * 4,
280                                      MEMTXATTRS_UNSPECIFIED,
281                                      data + word_number * 4, 4);
282     }
283 
284     if (result != MEMTX_OK) {
285         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
286                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
287         return -1;
288     }
289     return 0;
290 }
291 
292 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
293                              uint8_t *pq)
294 {
295     PnvXive2 *xive = PNV_XIVE2(xrtr);
296 
297     if (pnv_xive2_block_id(xive) != blk) {
298         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
299         return -1;
300     }
301 
302     *pq = xive_source_esb_get(&xive->ipi_source, idx);
303     return 0;
304 }
305 
306 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
307                              uint8_t *pq)
308 {
309     PnvXive2 *xive = PNV_XIVE2(xrtr);
310 
311     if (pnv_xive2_block_id(xive) != blk) {
312         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
313         return -1;
314     }
315 
316     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
317     return 0;
318 }
319 
320 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
321                              Xive2End *end)
322 {
323     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
324 }
325 
326 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
327                                Xive2End *end, uint8_t word_number)
328 {
329     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
330                               word_number);
331 }
332 
333 static int pnv_xive2_end_update(PnvXive2 *xive)
334 {
335     uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
336                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
337     uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
338                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
339     int i;
340     uint64_t endc_watch[4];
341 
342     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
343         endc_watch[i] =
344             cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
345     }
346 
347     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
348                               XIVE_VST_WORD_ALL);
349 }
350 
351 static void pnv_xive2_end_cache_load(PnvXive2 *xive)
352 {
353     uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
354                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
355     uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
356                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
357     uint64_t endc_watch[4] = { 0 };
358     int i;
359 
360     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
361         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
362     }
363 
364     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
365         xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
366             be64_to_cpu(endc_watch[i]);
367     }
368 }
369 
370 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
371                              Xive2Nvp *nvp)
372 {
373     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
374 }
375 
376 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
377                                Xive2Nvp *nvp, uint8_t word_number)
378 {
379     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
380                               word_number);
381 }
382 
383 static int pnv_xive2_nvp_update(PnvXive2 *xive)
384 {
385     uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
386                             xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
387     uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
388                             xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
389     int i;
390     uint64_t nxc_watch[4];
391 
392     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
393         nxc_watch[i] =
394             cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
395     }
396 
397     return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
398                               XIVE_VST_WORD_ALL);
399 }
400 
401 static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
402 {
403     uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
404                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
405     uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
406                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
407     uint64_t nxc_watch[4] = { 0 };
408     int i;
409 
410     if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
411         xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
412     }
413 
414     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
415         xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
416             be64_to_cpu(nxc_watch[i]);
417     }
418 }
419 
420 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
421                             Xive2Eas *eas)
422 {
423     PnvXive2 *xive = PNV_XIVE2(xrtr);
424 
425     if (pnv_xive2_block_id(xive) != blk) {
426         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
427         return -1;
428     }
429 
430     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
431 }
432 
433 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
434 {
435     PnvXive2 *xive = PNV_XIVE2(xrtr);
436     uint32_t cfg = 0;
437 
438     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
439         cfg |= XIVE2_GEN1_TIMA_OS;
440     }
441 
442     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
443         cfg |= XIVE2_VP_SAVE_RESTORE;
444     }
445 
446     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
447               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
448         cfg |= XIVE2_THREADID_8BITS;
449     }
450 
451     return cfg;
452 }
453 
454 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
455 {
456     int pir = ppc_cpu_pir(cpu);
457     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
458     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
459     uint32_t bit = pir & 0x3f;
460 
461     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
462 }
463 
464 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
465                                uint8_t nvt_blk, uint32_t nvt_idx,
466                                bool cam_ignore, uint8_t priority,
467                                uint32_t logic_serv, XiveTCTXMatch *match)
468 {
469     PnvXive2 *xive = PNV_XIVE2(xptr);
470     PnvChip *chip = xive->chip;
471     int count = 0;
472     int i, j;
473     bool gen1_tima_os =
474         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
475 
476     for (i = 0; i < chip->nr_cores; i++) {
477         PnvCore *pc = chip->cores[i];
478         CPUCore *cc = CPU_CORE(pc);
479 
480         for (j = 0; j < cc->nr_threads; j++) {
481             PowerPCCPU *cpu = pc->threads[j];
482             XiveTCTX *tctx;
483             int ring;
484 
485             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
486                 continue;
487             }
488 
489             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
490 
491             if (gen1_tima_os) {
492                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
493                                                  nvt_idx, cam_ignore,
494                                                  logic_serv);
495             } else {
496                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
497                                                    nvt_idx, cam_ignore,
498                                                    logic_serv);
499             }
500 
501             /*
502              * Save the context and follow on to catch duplicates,
503              * that we don't support yet.
504              */
505             if (ring != -1) {
506                 if (match->tctx) {
507                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
508                                   "thread context NVT %x/%x\n",
509                                   nvt_blk, nvt_idx);
510                     return false;
511                 }
512 
513                 match->ring = ring;
514                 match->tctx = tctx;
515                 count++;
516             }
517         }
518     }
519 
520     return count;
521 }
522 
523 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
524 {
525     PnvXive2 *xive = PNV_XIVE2(xptr);
526     uint32_t cfg = 0;
527 
528     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
529         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
530     }
531     return cfg;
532 }
533 
534 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
535 {
536     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
537 }
538 
539 /*
540  * The TIMA MMIO space is shared among the chips and to identify the
541  * chip from which the access is being done, we extract the chip id
542  * from the PIR.
543  */
544 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
545 {
546     int pir = ppc_cpu_pir(cpu);
547     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
548     PnvXive2 *xive = PNV_XIVE2(xptr);
549 
550     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
551         xive2_error(xive, "IC: CPU %x is not enabled", pir);
552     }
553     return xive;
554 }
555 
556 /*
557  * The internal sources of the interrupt controller have no knowledge
558  * of the XIVE2 chip on which they reside. Encode the block id in the
559  * source interrupt number before forwarding the source event
560  * notification to the Router. This is required on a multichip system.
561  */
562 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
563 {
564     PnvXive2 *xive = PNV_XIVE2(xn);
565     uint8_t blk = pnv_xive2_block_id(xive);
566 
567     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
568 }
569 
570 /*
571  * Set Translation Tables
572  *
573  * TODO add support for multiple sets
574  */
575 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
576 {
577     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
578     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
579                                   xive->cq_regs[CQ_TAR >> 3]);
580 
581     switch (tsel) {
582     case CQ_TAR_NVPG:
583     case CQ_TAR_ESB:
584     case CQ_TAR_END:
585         xive->tables[tsel][entry] = val;
586         break;
587     default:
588         xive2_error(xive, "IC: unsupported table %d", tsel);
589         return -1;
590     }
591 
592     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
593         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
594                      xive->cq_regs[CQ_TAR >> 3], ++entry);
595     }
596 
597     return 0;
598 }
599 /*
600  * Virtual Structure Tables (VST) configuration
601  */
602 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
603                                         uint8_t blk, uint64_t vsd)
604 {
605     Xive2EndSource *end_xsrc = &xive->end_source;
606     XiveSource *xsrc = &xive->ipi_source;
607     const XiveVstInfo *info = &vst_infos[type];
608     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
609     uint64_t vst_tsize = 1ull << page_shift;
610     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
611 
612     /* Basic checks */
613 
614     if (VSD_INDIRECT & vsd) {
615         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
616             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
617                        page_shift);
618             return;
619         }
620     }
621 
622     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
623         xive2_error(xive, "VST: %s table address 0x%"PRIx64
624                     " is not aligned with page shift %d",
625                     info->name, vst_addr, page_shift);
626         return;
627     }
628 
629     /* Record the table configuration (in SRAM on HW) */
630     xive->vsds[type][blk] = vsd;
631 
632     /* Now tune the models with the configuration provided by the FW */
633 
634     switch (type) {
635     case VST_ESB:
636         /*
637          * Backing store pages for the source PQ bits. The model does
638          * not use these PQ bits backed in RAM because the XiveSource
639          * model has its own.
640          *
641          * If the table is direct, we can compute the number of PQ
642          * entries provisioned by FW (such as skiboot) and resize the
643          * ESB window accordingly.
644          */
645         if (!(VSD_INDIRECT & vsd)) {
646             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
647                                    * (1ull << xsrc->esb_shift));
648         }
649 
650         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
651         break;
652 
653     case VST_EAS:  /* Nothing to be done */
654         break;
655 
656     case VST_END:
657         /*
658          * Backing store pages for the END.
659          */
660         if (!(VSD_INDIRECT & vsd)) {
661             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
662                                    * (1ull << end_xsrc->esb_shift));
663         }
664         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
665         break;
666 
667     case VST_NVP:  /* Not modeled */
668     case VST_NVG:  /* Not modeled */
669     case VST_NVC:  /* Not modeled */
670     case VST_IC:   /* Not modeled */
671     case VST_SYNC: /* Not modeled */
672     case VST_ERQ:  /* Not modeled */
673         break;
674 
675     default:
676         g_assert_not_reached();
677     }
678 }
679 
680 /*
681  * Both PC and VC sub-engines are configured as each use the Virtual
682  * Structure Tables
683  */
684 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
685 {
686     uint8_t mode = GETFIELD(VSD_MODE, vsd);
687     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
688                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
689     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
690                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
691     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
692 
693     if (type > VST_ERQ) {
694         xive2_error(xive, "VST: invalid table type %d", type);
695         return;
696     }
697 
698     if (blk >= vst_infos[type].max_blocks) {
699         xive2_error(xive, "VST: invalid block id %d for"
700                       " %s table", blk, vst_infos[type].name);
701         return;
702     }
703 
704     if (!vst_addr) {
705         xive2_error(xive, "VST: invalid %s table address",
706                    vst_infos[type].name);
707         return;
708     }
709 
710     switch (mode) {
711     case VSD_MODE_FORWARD:
712         xive->vsds[type][blk] = vsd;
713         break;
714 
715     case VSD_MODE_EXCLUSIVE:
716         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
717         break;
718 
719     default:
720         xive2_error(xive, "VST: unsupported table mode %d", mode);
721         return;
722     }
723 }
724 
725 /*
726  * MMIO handlers
727  */
728 
729 
730 /*
731  * IC BAR layout
732  *
733  * Page 0: Internal CQ register accesses (reads & writes)
734  * Page 1: Internal PC register accesses (reads & writes)
735  * Page 2: Internal VC register accesses (reads & writes)
736  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
737  * Page 4: Notify Port page (writes only, w/data),
738  * Page 5: Reserved
739  * Page 6: Sync Poll page (writes only, dataless)
740  * Page 7: Sync Inject page (writes only, dataless)
741  * Page 8: LSI Trigger page (writes only, dataless)
742  * Page 9: LSI SB Management page (reads & writes dataless)
743  * Pages 10-255: Reserved
744  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
745  *                covering the 128 threads in P10.
746  * Pages 384-511: Reserved
747  */
748 typedef struct PnvXive2Region {
749     const char *name;
750     uint32_t pgoff;
751     uint32_t pgsize;
752     const MemoryRegionOps *ops;
753 } PnvXive2Region;
754 
755 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
756 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
757 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
758 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
759 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
760 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
761 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
762 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
763 
764 /* 512 pages. 4K: 2M range, 64K: 32M range */
765 static const PnvXive2Region pnv_xive2_ic_regions[] = {
766     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
767     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
768     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
769     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
770     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
771     /* page 5 reserved */
772     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
773     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
774     /* pages 10-255 reserved */
775     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
776     /* pages 384-511 reserved */
777 };
778 
779 /*
780  * CQ operations
781  */
782 
783 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
784                                         unsigned size)
785 {
786     PnvXive2 *xive = PNV_XIVE2(opaque);
787     uint32_t reg = offset >> 3;
788     uint64_t val = 0;
789 
790     switch (offset) {
791     case CQ_XIVE_CAP: /* Set at reset */
792     case CQ_XIVE_CFG:
793         val = xive->cq_regs[reg];
794         break;
795     case CQ_MSGSND: /* TODO check the #cores of the machine */
796         val = 0xffffffff00000000;
797         break;
798     case CQ_CFG_PB_GEN:
799         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
800         break;
801     default:
802         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
803     }
804 
805     return val;
806 }
807 
808 static uint64_t pnv_xive2_bar_size(uint64_t val)
809 {
810     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
811 }
812 
813 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
814                                   uint64_t val, unsigned size)
815 {
816     PnvXive2 *xive = PNV_XIVE2(opaque);
817     MemoryRegion *sysmem = get_system_memory();
818     uint32_t reg = offset >> 3;
819     int i;
820 
821     switch (offset) {
822     case CQ_XIVE_CFG:
823     case CQ_RST_CTL: /* TODO: reset all BARs */
824         break;
825 
826     case CQ_IC_BAR:
827         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
828         if (!(val & CQ_IC_BAR_VALID)) {
829             xive->ic_base = 0;
830             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
831                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
832                     memory_region_del_subregion(&xive->ic_mmio,
833                                                 &xive->ic_mmios[i]);
834                 }
835                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
836             }
837         } else {
838             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
839             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
840                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
841                     memory_region_add_subregion(&xive->ic_mmio,
842                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
843                                &xive->ic_mmios[i]);
844                 }
845                 memory_region_add_subregion(sysmem, xive->ic_base,
846                                             &xive->ic_mmio);
847             }
848         }
849         break;
850 
851     case CQ_TM_BAR:
852         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
853         if (!(val & CQ_TM_BAR_VALID)) {
854             xive->tm_base = 0;
855             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
856                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
857             }
858         } else {
859             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
860             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
861                 memory_region_add_subregion(sysmem, xive->tm_base,
862                                             &xive->tm_mmio);
863             }
864         }
865         break;
866 
867     case CQ_ESB_BAR:
868         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
869         if (!(val & CQ_BAR_VALID)) {
870             xive->esb_base = 0;
871             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
872                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
873             }
874         } else {
875             xive->esb_base = val & CQ_BAR_ADDR;
876             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
877                 memory_region_set_size(&xive->esb_mmio,
878                                        pnv_xive2_bar_size(val));
879                 memory_region_add_subregion(sysmem, xive->esb_base,
880                                             &xive->esb_mmio);
881             }
882         }
883         break;
884 
885     case CQ_END_BAR:
886         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
887         if (!(val & CQ_BAR_VALID)) {
888             xive->end_base = 0;
889             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
890                 memory_region_del_subregion(sysmem, &xive->end_mmio);
891             }
892         } else {
893             xive->end_base = val & CQ_BAR_ADDR;
894             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
895                 memory_region_set_size(&xive->end_mmio,
896                                        pnv_xive2_bar_size(val));
897                 memory_region_add_subregion(sysmem, xive->end_base,
898                                             &xive->end_mmio);
899             }
900         }
901         break;
902 
903     case CQ_NVC_BAR:
904         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
905         if (!(val & CQ_BAR_VALID)) {
906             xive->nvc_base = 0;
907             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
908                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
909             }
910         } else {
911             xive->nvc_base = val & CQ_BAR_ADDR;
912             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
913                 memory_region_set_size(&xive->nvc_mmio,
914                                        pnv_xive2_bar_size(val));
915                 memory_region_add_subregion(sysmem, xive->nvc_base,
916                                             &xive->nvc_mmio);
917             }
918         }
919         break;
920 
921     case CQ_NVPG_BAR:
922         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
923         if (!(val & CQ_BAR_VALID)) {
924             xive->nvpg_base = 0;
925             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
926                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
927             }
928         } else {
929             xive->nvpg_base = val & CQ_BAR_ADDR;
930             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
931                 memory_region_set_size(&xive->nvpg_mmio,
932                                        pnv_xive2_bar_size(val));
933                 memory_region_add_subregion(sysmem, xive->nvpg_base,
934                                             &xive->nvpg_mmio);
935             }
936         }
937         break;
938 
939     case CQ_TAR: /* Set Translation Table Address */
940         break;
941     case CQ_TDR: /* Set Translation Table Data */
942         pnv_xive2_stt_set_data(xive, val);
943         break;
944     case CQ_FIRMASK_OR: /* FIR error reporting */
945         break;
946     default:
947         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
948         return;
949     }
950 
951     xive->cq_regs[reg] = val;
952 }
953 
954 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
955     .read = pnv_xive2_ic_cq_read,
956     .write = pnv_xive2_ic_cq_write,
957     .endianness = DEVICE_BIG_ENDIAN,
958     .valid = {
959         .min_access_size = 8,
960         .max_access_size = 8,
961     },
962     .impl = {
963         .min_access_size = 8,
964         .max_access_size = 8,
965     },
966 };
967 
968 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
969                                      unsigned size)
970 {
971     PnvXive2 *xive = PNV_XIVE2(opaque);
972     uint64_t val = 0;
973     uint32_t reg = offset >> 3;
974 
975     switch (offset) {
976     /*
977      * VSD table settings.
978      */
979     case VC_VSD_TABLE_ADDR:
980     case VC_VSD_TABLE_DATA:
981         val = xive->vc_regs[reg];
982         break;
983 
984     /*
985      * ESB cache updates (not modeled)
986      */
987     case VC_ESBC_FLUSH_CTRL:
988         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
989         val = xive->vc_regs[reg];
990         break;
991 
992     case VC_ESBC_CFG:
993         val = xive->vc_regs[reg];
994         break;
995 
996     /*
997      * EAS cache updates (not modeled)
998      */
999     case VC_EASC_FLUSH_CTRL:
1000         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1001         val = xive->vc_regs[reg];
1002         break;
1003 
1004     /*
1005      * END cache updates
1006      */
1007     case VC_ENDC_WATCH0_SPEC:
1008         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1009         val = xive->vc_regs[reg];
1010         break;
1011 
1012     case VC_ENDC_WATCH0_DATA0:
1013         /*
1014          * Load DATA registers from cache with data requested by the
1015          * SPEC register
1016          */
1017         pnv_xive2_end_cache_load(xive);
1018         val = xive->vc_regs[reg];
1019         break;
1020 
1021     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1022         val = xive->vc_regs[reg];
1023         break;
1024 
1025     case VC_ENDC_FLUSH_CTRL:
1026         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1027         val = xive->vc_regs[reg];
1028         break;
1029 
1030     /*
1031      * Indirect invalidation
1032      */
1033     case VC_AT_MACRO_KILL_MASK:
1034         val = xive->vc_regs[reg];
1035         break;
1036 
1037     case VC_AT_MACRO_KILL:
1038         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1039         val = xive->vc_regs[reg];
1040         break;
1041 
1042     /*
1043      * Interrupt fifo overflow in memory backing store (Not modeled)
1044      */
1045     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1046         val = xive->vc_regs[reg];
1047         break;
1048 
1049     /*
1050      * Synchronisation
1051      */
1052     case VC_ENDC_SYNC_DONE:
1053         val = VC_ENDC_SYNC_POLL_DONE;
1054         break;
1055     default:
1056         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1057     }
1058 
1059     return val;
1060 }
1061 
1062 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1063                                   uint64_t val, unsigned size)
1064 {
1065     PnvXive2 *xive = PNV_XIVE2(opaque);
1066     uint32_t reg = offset >> 3;
1067 
1068     switch (offset) {
1069     /*
1070      * VSD table settings.
1071      */
1072     case VC_VSD_TABLE_ADDR:
1073        break;
1074     case VC_VSD_TABLE_DATA:
1075         pnv_xive2_vst_set_data(xive, val);
1076         break;
1077 
1078     /*
1079      * ESB cache updates (not modeled)
1080      */
1081     /* case VC_ESBC_FLUSH_CTRL: */
1082     case VC_ESBC_FLUSH_POLL:
1083         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1084         /* ESB update */
1085         break;
1086 
1087     case VC_ESBC_CFG:
1088         break;
1089 
1090     /*
1091      * EAS cache updates (not modeled)
1092      */
1093     /* case VC_EASC_FLUSH_CTRL: */
1094     case VC_EASC_FLUSH_POLL:
1095         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1096         /* EAS update */
1097         break;
1098 
1099     /*
1100      * END cache updates
1101      */
1102     case VC_ENDC_WATCH0_SPEC:
1103          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1104         break;
1105 
1106     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1107         break;
1108     case VC_ENDC_WATCH0_DATA0:
1109         /* writing to DATA0 triggers the cache write */
1110         xive->vc_regs[reg] = val;
1111         pnv_xive2_end_update(xive);
1112         break;
1113 
1114 
1115     /* case VC_ENDC_FLUSH_CTRL: */
1116     case VC_ENDC_FLUSH_POLL:
1117         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1118         break;
1119 
1120     /*
1121      * Indirect invalidation
1122      */
1123     case VC_AT_MACRO_KILL:
1124     case VC_AT_MACRO_KILL_MASK:
1125         break;
1126 
1127     /*
1128      * Interrupt fifo overflow in memory backing store (Not modeled)
1129      */
1130     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1131         break;
1132 
1133     /*
1134      * Synchronisation
1135      */
1136     case VC_ENDC_SYNC_DONE:
1137         break;
1138 
1139     default:
1140         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1141         return;
1142     }
1143 
1144     xive->vc_regs[reg] = val;
1145 }
1146 
1147 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1148     .read = pnv_xive2_ic_vc_read,
1149     .write = pnv_xive2_ic_vc_write,
1150     .endianness = DEVICE_BIG_ENDIAN,
1151     .valid = {
1152         .min_access_size = 8,
1153         .max_access_size = 8,
1154     },
1155     .impl = {
1156         .min_access_size = 8,
1157         .max_access_size = 8,
1158     },
1159 };
1160 
1161 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1162                                      unsigned size)
1163 {
1164     PnvXive2 *xive = PNV_XIVE2(opaque);
1165     uint64_t val = -1;
1166     uint32_t reg = offset >> 3;
1167 
1168     switch (offset) {
1169     /*
1170      * VSD table settings.
1171      */
1172     case PC_VSD_TABLE_ADDR:
1173     case PC_VSD_TABLE_DATA:
1174         val = xive->pc_regs[reg];
1175         break;
1176 
1177     /*
1178      * cache updates
1179      */
1180     case PC_NXC_WATCH0_SPEC:
1181         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1182         val = xive->pc_regs[reg];
1183         break;
1184 
1185     case PC_NXC_WATCH0_DATA0:
1186        /*
1187         * Load DATA registers from cache with data requested by the
1188         * SPEC register
1189         */
1190         pnv_xive2_nvp_cache_load(xive);
1191         val = xive->pc_regs[reg];
1192         break;
1193 
1194     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1195         val = xive->pc_regs[reg];
1196         break;
1197 
1198     case PC_NXC_FLUSH_CTRL:
1199         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1200         val = xive->pc_regs[reg];
1201         break;
1202 
1203     /*
1204      * Indirect invalidation
1205      */
1206     case PC_AT_KILL:
1207         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1208         val = xive->pc_regs[reg];
1209         break;
1210 
1211     default:
1212         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1213     }
1214 
1215     return val;
1216 }
1217 
1218 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1219                                   uint64_t val, unsigned size)
1220 {
1221     PnvXive2 *xive = PNV_XIVE2(opaque);
1222     uint32_t reg = offset >> 3;
1223 
1224     switch (offset) {
1225 
1226     /*
1227      * VSD table settings. Only taken into account in the VC
1228      * sub-engine because the Xive2Router model combines both VC and PC
1229      * sub-engines
1230      */
1231     case PC_VSD_TABLE_ADDR:
1232     case PC_VSD_TABLE_DATA:
1233         break;
1234 
1235     /*
1236      * cache updates
1237      */
1238     case PC_NXC_WATCH0_SPEC:
1239         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1240         break;
1241 
1242     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1243         break;
1244     case PC_NXC_WATCH0_DATA0:
1245         /* writing to DATA0 triggers the cache write */
1246         xive->pc_regs[reg] = val;
1247         pnv_xive2_nvp_update(xive);
1248         break;
1249 
1250    /* case PC_NXC_FLUSH_CTRL: */
1251     case PC_NXC_FLUSH_POLL:
1252         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1253         break;
1254 
1255     /*
1256      * Indirect invalidation
1257      */
1258     case PC_AT_KILL:
1259     case PC_AT_KILL_MASK:
1260         break;
1261 
1262     default:
1263         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1264         return;
1265     }
1266 
1267     xive->pc_regs[reg] = val;
1268 }
1269 
1270 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1271     .read = pnv_xive2_ic_pc_read,
1272     .write = pnv_xive2_ic_pc_write,
1273     .endianness = DEVICE_BIG_ENDIAN,
1274     .valid = {
1275         .min_access_size = 8,
1276         .max_access_size = 8,
1277     },
1278     .impl = {
1279         .min_access_size = 8,
1280         .max_access_size = 8,
1281     },
1282 };
1283 
1284 
1285 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1286                                         unsigned size)
1287 {
1288     PnvXive2 *xive = PNV_XIVE2(opaque);
1289     uint64_t val = -1;
1290     uint32_t reg = offset >> 3;
1291 
1292     switch (offset) {
1293     /*
1294      * XIVE2 hardware thread enablement
1295      */
1296     case TCTXT_EN0:
1297     case TCTXT_EN1:
1298         val = xive->tctxt_regs[reg];
1299         break;
1300 
1301     case TCTXT_EN0_SET:
1302     case TCTXT_EN0_RESET:
1303         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1304         break;
1305     case TCTXT_EN1_SET:
1306     case TCTXT_EN1_RESET:
1307         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1308         break;
1309     case TCTXT_CFG:
1310         val = xive->tctxt_regs[reg];
1311         break;
1312     default:
1313         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1314     }
1315 
1316     return val;
1317 }
1318 
1319 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1320                                      uint64_t val, unsigned size)
1321 {
1322     PnvXive2 *xive = PNV_XIVE2(opaque);
1323     uint32_t reg = offset >> 3;
1324 
1325     switch (offset) {
1326     /*
1327      * XIVE2 hardware thread enablement
1328      */
1329     case TCTXT_EN0: /* Physical Thread Enable */
1330     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1331         xive->tctxt_regs[reg] = val;
1332         break;
1333 
1334     case TCTXT_EN0_SET:
1335         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1336         break;
1337     case TCTXT_EN1_SET:
1338         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1339         break;
1340     case TCTXT_EN0_RESET:
1341         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1342         break;
1343     case TCTXT_EN1_RESET:
1344         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1345         break;
1346     case TCTXT_CFG:
1347         xive->tctxt_regs[reg] = val;
1348         break;
1349     default:
1350         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1351         return;
1352     }
1353 }
1354 
1355 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1356     .read = pnv_xive2_ic_tctxt_read,
1357     .write = pnv_xive2_ic_tctxt_write,
1358     .endianness = DEVICE_BIG_ENDIAN,
1359     .valid = {
1360         .min_access_size = 8,
1361         .max_access_size = 8,
1362     },
1363     .impl = {
1364         .min_access_size = 8,
1365         .max_access_size = 8,
1366     },
1367 };
1368 
1369 /*
1370  * Redirect XSCOM to MMIO handlers
1371  */
1372 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1373                                      unsigned size)
1374 {
1375     PnvXive2 *xive = PNV_XIVE2(opaque);
1376     uint64_t val = -1;
1377     uint32_t xscom_reg = offset >> 3;
1378     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1379 
1380     switch (xscom_reg) {
1381     case 0x000 ... 0x0FF:
1382         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1383         break;
1384     case 0x100 ... 0x1FF:
1385         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1386         break;
1387     case 0x200 ... 0x2FF:
1388         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1389         break;
1390     case 0x300 ... 0x3FF:
1391         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1392         break;
1393     default:
1394         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1395     }
1396 
1397     return val;
1398 }
1399 
1400 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1401                                   uint64_t val, unsigned size)
1402 {
1403     PnvXive2 *xive = PNV_XIVE2(opaque);
1404     uint32_t xscom_reg = offset >> 3;
1405     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1406 
1407     switch (xscom_reg) {
1408     case 0x000 ... 0x0FF:
1409         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1410         break;
1411     case 0x100 ... 0x1FF:
1412         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1413         break;
1414     case 0x200 ... 0x2FF:
1415         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1416         break;
1417     case 0x300 ... 0x3FF:
1418         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1419         break;
1420     default:
1421         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1422     }
1423 }
1424 
1425 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1426     .read = pnv_xive2_xscom_read,
1427     .write = pnv_xive2_xscom_write,
1428     .endianness = DEVICE_BIG_ENDIAN,
1429     .valid = {
1430         .min_access_size = 8,
1431         .max_access_size = 8,
1432     },
1433     .impl = {
1434         .min_access_size = 8,
1435         .max_access_size = 8,
1436     },
1437 };
1438 
1439 /*
1440  * Notify port page. The layout is compatible between 4K and 64K pages :
1441  *
1442  * Page 1           Notify page (writes only)
1443  *  0x000 - 0x7FF   IPI interrupt (NPU)
1444  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1445  */
1446 
1447 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1448                                     uint64_t val)
1449 {
1450     uint8_t blk;
1451     uint32_t idx;
1452 
1453     if (val & XIVE_TRIGGER_END) {
1454         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1455                    addr, val);
1456         return;
1457     }
1458 
1459     /*
1460      * Forward the source event notification directly to the Router.
1461      * The source interrupt number should already be correctly encoded
1462      * with the chip block id by the sending device (PHB, PSI).
1463      */
1464     blk = XIVE_EAS_BLOCK(val);
1465     idx = XIVE_EAS_INDEX(val);
1466 
1467     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1468                          !!(val & XIVE_TRIGGER_PQ));
1469 }
1470 
1471 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1472                                       uint64_t val, unsigned size)
1473 {
1474     PnvXive2 *xive = PNV_XIVE2(opaque);
1475 
1476     /* VC: IPI triggers */
1477     switch (offset) {
1478     case 0x000 ... 0x7FF:
1479         /* TODO: check IPI notify sub-page routing */
1480         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1481         break;
1482 
1483     /* VC: HW triggers */
1484     case 0x800 ... 0xFFF:
1485         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1486         break;
1487 
1488     default:
1489         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1490     }
1491 }
1492 
1493 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1494                                          unsigned size)
1495 {
1496     PnvXive2 *xive = PNV_XIVE2(opaque);
1497 
1498    /* loads are invalid */
1499     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1500     return -1;
1501 }
1502 
1503 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1504     .read = pnv_xive2_ic_notify_read,
1505     .write = pnv_xive2_ic_notify_write,
1506     .endianness = DEVICE_BIG_ENDIAN,
1507     .valid = {
1508         .min_access_size = 8,
1509         .max_access_size = 8,
1510     },
1511     .impl = {
1512         .min_access_size = 8,
1513         .max_access_size = 8,
1514     },
1515 };
1516 
1517 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1518                                       unsigned size)
1519 {
1520     PnvXive2 *xive = PNV_XIVE2(opaque);
1521 
1522     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1523     return -1;
1524 }
1525 
1526 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1527                                    uint64_t val, unsigned size)
1528 {
1529     PnvXive2 *xive = PNV_XIVE2(opaque);
1530 
1531     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1532 }
1533 
1534 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1535     .read = pnv_xive2_ic_lsi_read,
1536     .write = pnv_xive2_ic_lsi_write,
1537     .endianness = DEVICE_BIG_ENDIAN,
1538     .valid = {
1539         .min_access_size = 8,
1540         .max_access_size = 8,
1541     },
1542     .impl = {
1543         .min_access_size = 8,
1544         .max_access_size = 8,
1545     },
1546 };
1547 
1548 /*
1549  * Sync MMIO page (write only)
1550  */
1551 #define PNV_XIVE2_SYNC_IPI      0x000
1552 #define PNV_XIVE2_SYNC_HW       0x080
1553 #define PNV_XIVE2_SYNC_NxC      0x100
1554 #define PNV_XIVE2_SYNC_INT      0x180
1555 #define PNV_XIVE2_SYNC_OS_ESC   0x200
1556 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1557 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1558 
1559 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1560                                        unsigned size)
1561 {
1562     PnvXive2 *xive = PNV_XIVE2(opaque);
1563 
1564     /* loads are invalid */
1565     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1566     return -1;
1567 }
1568 
1569 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1570                                     uint64_t val, unsigned size)
1571 {
1572     PnvXive2 *xive = PNV_XIVE2(opaque);
1573 
1574     switch (offset) {
1575     case PNV_XIVE2_SYNC_IPI:
1576     case PNV_XIVE2_SYNC_HW:
1577     case PNV_XIVE2_SYNC_NxC:
1578     case PNV_XIVE2_SYNC_INT:
1579     case PNV_XIVE2_SYNC_OS_ESC:
1580     case PNV_XIVE2_SYNC_POOL_ESC:
1581     case PNV_XIVE2_SYNC_HARD_ESC:
1582         break;
1583     default:
1584         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1585     }
1586 }
1587 
1588 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1589     .read = pnv_xive2_ic_sync_read,
1590     .write = pnv_xive2_ic_sync_write,
1591     .endianness = DEVICE_BIG_ENDIAN,
1592     .valid = {
1593         .min_access_size = 8,
1594         .max_access_size = 8,
1595     },
1596     .impl = {
1597         .min_access_size = 8,
1598         .max_access_size = 8,
1599     },
1600 };
1601 
1602 /*
1603  * When the TM direct pages of the IC controller are accessed, the
1604  * target HW thread is deduced from the page offset.
1605  */
1606 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
1607 {
1608     /* On P10, the node ID shift in the PIR register is 8 bits */
1609     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
1610 }
1611 
1612 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
1613                                                    hwaddr offset)
1614 {
1615     /*
1616      * Indirect TIMA accesses are similar to direct accesses for
1617      * privilege ring 0. So remove any traces of the hw thread ID from
1618      * the offset in the IC BAR as it could be interpreted as the ring
1619      * privilege when calling the underlying direct access functions.
1620      */
1621     return offset & ((1ull << xive->ic_shift) - 1);
1622 }
1623 
1624 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
1625 {
1626     PnvChip *chip = xive->chip;
1627     PowerPCCPU *cpu = NULL;
1628 
1629     cpu = pnv_chip_find_cpu(chip, pir);
1630     if (!cpu) {
1631         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
1632         return NULL;
1633     }
1634 
1635     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
1636         xive2_error(xive, "IC: CPU %x is not enabled", pir);
1637     }
1638 
1639     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1640 }
1641 
1642 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
1643                                               unsigned size)
1644 {
1645     PnvXive2 *xive = PNV_XIVE2(opaque);
1646     XivePresenter *xptr = XIVE_PRESENTER(xive);
1647     hwaddr hw_page_offset;
1648     uint32_t pir;
1649     XiveTCTX *tctx;
1650     uint64_t val = -1;
1651 
1652     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
1653     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
1654     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1655     if (tctx) {
1656         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
1657     }
1658 
1659     return val;
1660 }
1661 
1662 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
1663                                            uint64_t val, unsigned size)
1664 {
1665     PnvXive2 *xive = PNV_XIVE2(opaque);
1666     XivePresenter *xptr = XIVE_PRESENTER(xive);
1667     hwaddr hw_page_offset;
1668     uint32_t pir;
1669     XiveTCTX *tctx;
1670 
1671     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
1672     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
1673     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1674     if (tctx) {
1675         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
1676     }
1677 }
1678 
1679 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
1680     .read = pnv_xive2_ic_tm_indirect_read,
1681     .write = pnv_xive2_ic_tm_indirect_write,
1682     .endianness = DEVICE_BIG_ENDIAN,
1683     .valid = {
1684         .min_access_size = 1,
1685         .max_access_size = 8,
1686     },
1687     .impl = {
1688         .min_access_size = 1,
1689         .max_access_size = 8,
1690     },
1691 };
1692 
1693 /*
1694  * TIMA ops
1695  */
1696 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
1697                                uint64_t value, unsigned size)
1698 {
1699     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1700     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1701     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1702     XivePresenter *xptr = XIVE_PRESENTER(xive);
1703 
1704     xive_tctx_tm_write(xptr, tctx, offset, value, size);
1705 }
1706 
1707 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
1708 {
1709     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1710     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1711     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1712     XivePresenter *xptr = XIVE_PRESENTER(xive);
1713 
1714     return xive_tctx_tm_read(xptr, tctx, offset, size);
1715 }
1716 
1717 static const MemoryRegionOps pnv_xive2_tm_ops = {
1718     .read = pnv_xive2_tm_read,
1719     .write = pnv_xive2_tm_write,
1720     .endianness = DEVICE_BIG_ENDIAN,
1721     .valid = {
1722         .min_access_size = 1,
1723         .max_access_size = 8,
1724     },
1725     .impl = {
1726         .min_access_size = 1,
1727         .max_access_size = 8,
1728     },
1729 };
1730 
1731 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
1732                                    unsigned size)
1733 {
1734     PnvXive2 *xive = PNV_XIVE2(opaque);
1735 
1736     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
1737     return -1;
1738 }
1739 
1740 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
1741                                 uint64_t val, unsigned size)
1742 {
1743     PnvXive2 *xive = PNV_XIVE2(opaque);
1744 
1745     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
1746 }
1747 
1748 static const MemoryRegionOps pnv_xive2_nvc_ops = {
1749     .read = pnv_xive2_nvc_read,
1750     .write = pnv_xive2_nvc_write,
1751     .endianness = DEVICE_BIG_ENDIAN,
1752     .valid = {
1753         .min_access_size = 8,
1754         .max_access_size = 8,
1755     },
1756     .impl = {
1757         .min_access_size = 8,
1758         .max_access_size = 8,
1759     },
1760 };
1761 
1762 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
1763                                     unsigned size)
1764 {
1765     PnvXive2 *xive = PNV_XIVE2(opaque);
1766 
1767     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
1768     return -1;
1769 }
1770 
1771 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
1772                                  uint64_t val, unsigned size)
1773 {
1774     PnvXive2 *xive = PNV_XIVE2(opaque);
1775 
1776     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
1777 }
1778 
1779 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
1780     .read = pnv_xive2_nvpg_read,
1781     .write = pnv_xive2_nvpg_write,
1782     .endianness = DEVICE_BIG_ENDIAN,
1783     .valid = {
1784         .min_access_size = 8,
1785         .max_access_size = 8,
1786     },
1787     .impl = {
1788         .min_access_size = 8,
1789         .max_access_size = 8,
1790     },
1791 };
1792 
1793 /*
1794  * POWER10 default capabilities: 0x2000120076f000FC
1795  */
1796 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
1797 
1798 /*
1799  * POWER10 default configuration: 0x0030000033000000
1800  *
1801  * 8bits thread id was dropped for P10
1802  */
1803 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1804 
1805 static void pnv_xive2_reset(void *dev)
1806 {
1807     PnvXive2 *xive = PNV_XIVE2(dev);
1808     XiveSource *xsrc = &xive->ipi_source;
1809     Xive2EndSource *end_xsrc = &xive->end_source;
1810 
1811     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
1812     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
1813 
1814     /* HW hardwires the #Topology of the chip in the block field */
1815     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
1816         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
1817 
1818     /* Set default page size to 64k */
1819     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
1820     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
1821 
1822     /* Clear source MMIOs */
1823     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1824         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
1825     }
1826 
1827     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1828         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
1829     }
1830 }
1831 
1832 /*
1833  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1834  *  software.
1835  */
1836 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1837 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1838 
1839 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
1840 {
1841     PnvXive2 *xive = PNV_XIVE2(dev);
1842     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
1843     XiveSource *xsrc = &xive->ipi_source;
1844     Xive2EndSource *end_xsrc = &xive->end_source;
1845     Error *local_err = NULL;
1846     int i;
1847 
1848     pxc->parent_realize(dev, &local_err);
1849     if (local_err) {
1850         error_propagate(errp, local_err);
1851         return;
1852     }
1853 
1854     assert(xive->chip);
1855 
1856     /*
1857      * The XiveSource and Xive2EndSource objects are realized with the
1858      * maximum allowed HW configuration. The ESB MMIO regions will be
1859      * resized dynamically when the controller is configured by the FW
1860      * to limit accesses to resources not provisioned.
1861      */
1862     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
1863                             &error_fatal);
1864     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
1865                             &error_fatal);
1866     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
1867                              &error_fatal);
1868     qdev_realize(DEVICE(xsrc), NULL, &local_err);
1869     if (local_err) {
1870         error_propagate(errp, local_err);
1871         return;
1872     }
1873 
1874     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
1875                             &error_fatal);
1876     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1877                              &error_abort);
1878     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
1879     if (local_err) {
1880         error_propagate(errp, local_err);
1881         return;
1882     }
1883 
1884     /* XSCOM region, used for initial configuration of the BARs */
1885     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
1886                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
1887                           PNV10_XSCOM_XIVE2_SIZE << 3);
1888 
1889     /* Interrupt controller MMIO regions */
1890     xive->ic_shift = 16;
1891     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1892                        PNV10_XIVE2_IC_SIZE);
1893 
1894     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1895         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
1896                          pnv_xive2_ic_regions[i].ops, xive,
1897                          pnv_xive2_ic_regions[i].name,
1898                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
1899     }
1900 
1901     /*
1902      * VC MMIO regions.
1903      */
1904     xive->esb_shift = 16;
1905     xive->end_shift = 16;
1906     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
1907                        PNV10_XIVE2_ESB_SIZE);
1908     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
1909                        PNV10_XIVE2_END_SIZE);
1910 
1911     /* Presenter Controller MMIO region (not modeled) */
1912     xive->nvc_shift = 16;
1913     xive->nvpg_shift = 16;
1914     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
1915                           &pnv_xive2_nvc_ops, xive,
1916                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
1917 
1918     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
1919                           &pnv_xive2_nvpg_ops, xive,
1920                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
1921 
1922     /* Thread Interrupt Management Area (Direct) */
1923     xive->tm_shift = 16;
1924     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
1925                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
1926 
1927     qemu_register_reset(pnv_xive2_reset, dev);
1928 }
1929 
1930 static Property pnv_xive2_properties[] = {
1931     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
1932     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
1933     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
1934     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
1935     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
1936     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
1937     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
1938                        PNV_XIVE2_CAPABILITIES),
1939     DEFINE_PROP_UINT64("config", PnvXive2, config,
1940                        PNV_XIVE2_CONFIGURATION),
1941     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
1942     DEFINE_PROP_END_OF_LIST(),
1943 };
1944 
1945 static void pnv_xive2_instance_init(Object *obj)
1946 {
1947     PnvXive2 *xive = PNV_XIVE2(obj);
1948 
1949     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1950                             TYPE_XIVE_SOURCE);
1951     object_initialize_child(obj, "end_source", &xive->end_source,
1952                             TYPE_XIVE2_END_SOURCE);
1953 }
1954 
1955 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
1956                               int xscom_offset)
1957 {
1958     const char compat_p10[] = "ibm,power10-xive-x";
1959     char *name;
1960     int offset;
1961     uint32_t reg[] = {
1962         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
1963         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
1964     };
1965 
1966     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
1967     offset = fdt_add_subnode(fdt, xscom_offset, name);
1968     _FDT(offset);
1969     g_free(name);
1970 
1971     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1972     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
1973                      sizeof(compat_p10)));
1974     return 0;
1975 }
1976 
1977 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
1978 {
1979     DeviceClass *dc = DEVICE_CLASS(klass);
1980     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1981     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
1982     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1983     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1984     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
1985 
1986     xdc->dt_xscom  = pnv_xive2_dt_xscom;
1987 
1988     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
1989     device_class_set_parent_realize(dc, pnv_xive2_realize,
1990                                     &pxc->parent_realize);
1991     device_class_set_props(dc, pnv_xive2_properties);
1992 
1993     xrc->get_eas   = pnv_xive2_get_eas;
1994     xrc->get_pq    = pnv_xive2_get_pq;
1995     xrc->set_pq    = pnv_xive2_set_pq;
1996     xrc->get_end   = pnv_xive2_get_end;
1997     xrc->write_end = pnv_xive2_write_end;
1998     xrc->get_nvp   = pnv_xive2_get_nvp;
1999     xrc->write_nvp = pnv_xive2_write_nvp;
2000     xrc->get_config  = pnv_xive2_get_config;
2001     xrc->get_block_id = pnv_xive2_get_block_id;
2002 
2003     xnc->notify    = pnv_xive2_notify;
2004 
2005     xpc->match_nvt  = pnv_xive2_match_nvt;
2006     xpc->get_config = pnv_xive2_presenter_get_config;
2007 };
2008 
2009 static const TypeInfo pnv_xive2_info = {
2010     .name          = TYPE_PNV_XIVE2,
2011     .parent        = TYPE_XIVE2_ROUTER,
2012     .instance_init = pnv_xive2_instance_init,
2013     .instance_size = sizeof(PnvXive2),
2014     .class_init    = pnv_xive2_class_init,
2015     .class_size    = sizeof(PnvXive2Class),
2016     .interfaces    = (InterfaceInfo[]) {
2017         { TYPE_PNV_XSCOM_INTERFACE },
2018         { }
2019     }
2020 };
2021 
2022 static void pnv_xive2_register_types(void)
2023 {
2024     type_register_static(&pnv_xive2_info);
2025 }
2026 
2027 type_init(pnv_xive2_register_types)
2028 
2029 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
2030                                      Monitor *mon)
2031 {
2032     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
2033     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
2034 
2035     if (!xive2_nvp_is_valid(nvp)) {
2036         return;
2037     }
2038 
2039     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x",
2040                    nvp_idx, eq_blk, eq_idx,
2041                    xive_get_field32(NVP2_W2_IPB, nvp->w2));
2042     /*
2043      * When the NVP is HW controlled, more fields are updated
2044      */
2045     if (xive2_nvp_is_hw(nvp)) {
2046         monitor_printf(mon, " CPPR:%02x",
2047                        xive_get_field32(NVP2_W2_CPPR, nvp->w2));
2048         if (xive2_nvp_is_co(nvp)) {
2049             monitor_printf(mon, " CO:%04x",
2050                            xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
2051         }
2052     }
2053     monitor_printf(mon, "\n");
2054 }
2055 
2056 /*
2057  * If the table is direct, we can compute the number of PQ entries
2058  * provisioned by FW.
2059  */
2060 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2061 {
2062     uint8_t blk = pnv_xive2_block_id(xive);
2063     uint64_t vsd = xive->vsds[VST_ESB][blk];
2064     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2065 
2066     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2067 }
2068 
2069 /*
2070  * Compute the number of entries per indirect subpage.
2071  */
2072 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2073 {
2074     uint8_t blk = pnv_xive2_block_id(xive);
2075     uint64_t vsd = xive->vsds[type][blk];
2076     const XiveVstInfo *info = &vst_infos[type];
2077     uint64_t vsd_addr;
2078     uint32_t page_shift;
2079 
2080     /* For direct tables, fake a valid value */
2081     if (!(VSD_INDIRECT & vsd)) {
2082         return 1;
2083     }
2084 
2085     /* Get the page size of the indirect table. */
2086     vsd_addr = vsd & VSD_ADDRESS_MASK;
2087     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2088 
2089     if (!(vsd & VSD_ADDRESS_MASK)) {
2090 #ifdef XIVE2_DEBUG
2091         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2092 #endif
2093         return 0;
2094     }
2095 
2096     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2097 
2098     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2099         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2100                    page_shift);
2101         return 0;
2102     }
2103 
2104     return (1ull << page_shift) / info->size;
2105 }
2106 
2107 void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon)
2108 {
2109     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2110     uint8_t blk = pnv_xive2_block_id(xive);
2111     uint8_t chip_id = xive->chip->chip_id;
2112     uint32_t srcno0 = XIVE_EAS(blk, 0);
2113     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2114     Xive2Eas eas;
2115     Xive2End end;
2116     Xive2Nvp nvp;
2117     int i;
2118     uint64_t xive_nvp_per_subpage;
2119 
2120     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
2121                    srcno0 + nr_esbs - 1);
2122     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
2123 
2124     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
2125                    srcno0 + nr_esbs - 1);
2126     for (i = 0; i < nr_esbs; i++) {
2127         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2128             break;
2129         }
2130         if (!xive2_eas_is_masked(&eas)) {
2131             xive2_eas_pic_print_info(&eas, i, mon);
2132         }
2133     }
2134 
2135     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
2136     i = 0;
2137     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2138         xive2_end_eas_pic_print_info(&end, i++, mon);
2139     }
2140 
2141     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2142     i = 0;
2143     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2144         xive2_end_pic_print_info(&end, i++, mon);
2145     }
2146 
2147     monitor_printf(mon, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk,
2148                    0, XIVE2_NVP_COUNT - 1);
2149     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2150     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2151         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2152             xive2_nvp_pic_print_info(&nvp, i++, mon);
2153         }
2154     }
2155 }
2156