xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision da71b7e3ed454bd9200367e09bf75160f8f097a9)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/pnv.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 
29 #include <libfdt.h>
30 
31 #include "pnv_xive2_regs.h"
32 
33 #undef XIVE2_DEBUG
34 
35 /*
36  * Virtual structures table (VST)
37  */
38 #define SBE_PER_BYTE   4
39 
40 typedef struct XiveVstInfo {
41     const char *name;
42     uint32_t    size;
43     uint32_t    max_blocks;
44 } XiveVstInfo;
45 
46 static const XiveVstInfo vst_infos[] = {
47 
48     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),  16 },
49     [VST_ESB]  = { "ESB",  1,                  16 },
50     [VST_END]  = { "ENDT", sizeof(Xive2End),  16 },
51 
52     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),  16 },
53     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc), 16 },
54     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc), 16 },
55 
56     [VST_IC]  =  { "IC",   1 /* ? */         , 16 }, /* Topology # */
57     [VST_SYNC] = { "SYNC", 1 /* ? */         , 16 }, /* Topology # */
58 
59     /*
60      * This table contains the backing store pages for the interrupt
61      * fifos of the VC sub-engine in case of overflow.
62      *
63      * 0 - IPI,
64      * 1 - HWD,
65      * 2 - NxC,
66      * 3 - INT,
67      * 4 - OS-Queue,
68      * 5 - Pool-Queue,
69      * 6 - Hard-Queue
70      */
71     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
72 };
73 
74 #define xive2_error(xive, fmt, ...)                                      \
75     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
76                   (xive)->chip->chip_id, ## __VA_ARGS__);
77 
78 /*
79  * QEMU version of the GETFIELD/SETFIELD macros
80  *
81  * TODO: It might be better to use the existing extract64() and
82  * deposit64() but this means that all the register definitions will
83  * change and become incompatible with the ones found in skiboot.
84  *
85  * Keep it as it is for now until we find a common ground.
86  */
87 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
88 {
89     return (word & mask) >> ctz64(mask);
90 }
91 
92 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
93                                 uint64_t value)
94 {
95     return (word & ~mask) | ((value << ctz64(mask)) & mask);
96 }
97 
98 /*
99  * TODO: Document block id override
100  */
101 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
102 {
103     uint8_t blk = xive->chip->chip_id;
104     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
105 
106     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
107         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
108     }
109 
110     return blk;
111 }
112 
113 /*
114  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
115  * of the chips is good enough.
116  *
117  * TODO: Block scope support
118  */
119 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
120 {
121     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
122     int i;
123 
124     for (i = 0; i < pnv->num_chips; i++) {
125         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
126         PnvXive2 *xive = &chip10->xive;
127 
128         if (pnv_xive2_block_id(xive) == blk) {
129             return xive;
130         }
131     }
132     return NULL;
133 }
134 
135 /*
136  * VST accessors for ESB, EAT, ENDT, NVP
137  *
138  * Indirect VST tables are arrays of VSDs pointing to a page (of same
139  * size). Each page is a direct VST table.
140  */
141 
142 #define XIVE_VSD_SIZE 8
143 
144 /* Indirect page size can be 4K, 64K, 2M, 16M. */
145 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
146 {
147      return page_shift == 12 || page_shift == 16 ||
148          page_shift == 21 || page_shift == 24;
149 }
150 
151 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
152                                           uint64_t vsd, uint32_t idx)
153 {
154     const XiveVstInfo *info = &vst_infos[type];
155     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
156     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
157     uint32_t idx_max;
158 
159     idx_max = vst_tsize / info->size - 1;
160     if (idx > idx_max) {
161 #ifdef XIVE2_DEBUG
162         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
163                    info->name, idx, idx_max);
164 #endif
165         return 0;
166     }
167 
168     return vst_addr + idx * info->size;
169 }
170 
171 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
172                                             uint64_t vsd, uint32_t idx)
173 {
174     const XiveVstInfo *info = &vst_infos[type];
175     uint64_t vsd_addr;
176     uint32_t vsd_idx;
177     uint32_t page_shift;
178     uint32_t vst_per_page;
179 
180     /* Get the page size of the indirect table. */
181     vsd_addr = vsd & VSD_ADDRESS_MASK;
182     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
183 
184     if (!(vsd & VSD_ADDRESS_MASK)) {
185         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
186         return 0;
187     }
188 
189     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
190 
191     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
192         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
193                    page_shift);
194         return 0;
195     }
196 
197     vst_per_page = (1ull << page_shift) / info->size;
198     vsd_idx = idx / vst_per_page;
199 
200     /* Load the VSD we are looking for, if not already done */
201     if (vsd_idx) {
202         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
203         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
204                    MEMTXATTRS_UNSPECIFIED);
205 
206         if (!(vsd & VSD_ADDRESS_MASK)) {
207             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
208             return 0;
209         }
210 
211         /*
212          * Check that the pages have a consistent size across the
213          * indirect table
214          */
215         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
216             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
217                        info->name, idx);
218             return 0;
219         }
220     }
221 
222     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
223 }
224 
225 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
226                                    uint32_t idx)
227 {
228     const XiveVstInfo *info = &vst_infos[type];
229     uint64_t vsd;
230 
231     if (blk >= info->max_blocks) {
232         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
233                    blk, info->name, idx);
234         return 0;
235     }
236 
237     vsd = xive->vsds[type][blk];
238 
239     /* Remote VST access */
240     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
241         xive = pnv_xive2_get_remote(blk);
242 
243         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
244     }
245 
246     if (VSD_INDIRECT & vsd) {
247         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
248     }
249 
250     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
251 }
252 
253 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
254                              uint32_t idx, void *data)
255 {
256     const XiveVstInfo *info = &vst_infos[type];
257     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
258 
259     if (!addr) {
260         return -1;
261     }
262 
263     cpu_physical_memory_read(addr, data, info->size);
264     return 0;
265 }
266 
267 #define XIVE_VST_WORD_ALL -1
268 
269 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
270                                uint32_t idx, void *data, uint32_t word_number)
271 {
272     const XiveVstInfo *info = &vst_infos[type];
273     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
274 
275     if (!addr) {
276         return -1;
277     }
278 
279     if (word_number == XIVE_VST_WORD_ALL) {
280         cpu_physical_memory_write(addr, data, info->size);
281     } else {
282         cpu_physical_memory_write(addr + word_number * 4,
283                                   data + word_number * 4, 4);
284     }
285     return 0;
286 }
287 
288 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
289                              Xive2End *end)
290 {
291     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
292 }
293 
294 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
295                                Xive2End *end, uint8_t word_number)
296 {
297     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
298                               word_number);
299 }
300 
301 static int pnv_xive2_end_update(PnvXive2 *xive)
302 {
303     uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
304                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
305     uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
306                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
307     int i;
308     uint64_t endc_watch[4];
309 
310     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
311         endc_watch[i] =
312             cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
313     }
314 
315     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
316                               XIVE_VST_WORD_ALL);
317 }
318 
319 static void pnv_xive2_end_cache_load(PnvXive2 *xive)
320 {
321     uint8_t  blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
322                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
323     uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
324                            xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
325     uint64_t endc_watch[4] = { 0 };
326     int i;
327 
328     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
329         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
330     }
331 
332     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
333         xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
334             be64_to_cpu(endc_watch[i]);
335     }
336 }
337 
338 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
339                              Xive2Nvp *nvp)
340 {
341     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
342 }
343 
344 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
345                                Xive2Nvp *nvp, uint8_t word_number)
346 {
347     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
348                               word_number);
349 }
350 
351 static int pnv_xive2_nvp_update(PnvXive2 *xive)
352 {
353     uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
354                             xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
355     uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
356                             xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
357     int i;
358     uint64_t nxc_watch[4];
359 
360     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
361         nxc_watch[i] =
362             cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
363     }
364 
365     return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
366                               XIVE_VST_WORD_ALL);
367 }
368 
369 static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
370 {
371     uint8_t  blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
372                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
373     uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
374                            xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
375     uint64_t nxc_watch[4] = { 0 };
376     int i;
377 
378     if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
379         xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
380     }
381 
382     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
383         xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
384             be64_to_cpu(nxc_watch[i]);
385     }
386 }
387 
388 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
389                             Xive2Eas *eas)
390 {
391     PnvXive2 *xive = PNV_XIVE2(xrtr);
392 
393     if (pnv_xive2_block_id(xive) != blk) {
394         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
395         return -1;
396     }
397 
398     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
399 }
400 
401 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
402 {
403     int pir = ppc_cpu_pir(cpu);
404     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
405     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
406     uint32_t bit = pir & 0x3f;
407 
408     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
409 }
410 
411 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
412                                uint8_t nvt_blk, uint32_t nvt_idx,
413                                bool cam_ignore, uint8_t priority,
414                                uint32_t logic_serv, XiveTCTXMatch *match)
415 {
416     PnvXive2 *xive = PNV_XIVE2(xptr);
417     PnvChip *chip = xive->chip;
418     int count = 0;
419     int i, j;
420 
421     for (i = 0; i < chip->nr_cores; i++) {
422         PnvCore *pc = chip->cores[i];
423         CPUCore *cc = CPU_CORE(pc);
424 
425         for (j = 0; j < cc->nr_threads; j++) {
426             PowerPCCPU *cpu = pc->threads[j];
427             XiveTCTX *tctx;
428             int ring;
429 
430             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
431                 continue;
432             }
433 
434             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
435 
436             ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
437                                               nvt_idx, cam_ignore,
438                                               logic_serv);
439 
440             /*
441              * Save the context and follow on to catch duplicates,
442              * that we don't support yet.
443              */
444             if (ring != -1) {
445                 if (match->tctx) {
446                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
447                                   "thread context NVT %x/%x\n",
448                                   nvt_blk, nvt_idx);
449                     return false;
450                 }
451 
452                 match->ring = ring;
453                 match->tctx = tctx;
454                 count++;
455             }
456         }
457     }
458 
459     return count;
460 }
461 
462 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
463 {
464     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
465 }
466 
467 /*
468  * The TIMA MMIO space is shared among the chips and to identify the
469  * chip from which the access is being done, we extract the chip id
470  * from the PIR.
471  */
472 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
473 {
474     int pir = ppc_cpu_pir(cpu);
475     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
476     PnvXive2 *xive = PNV_XIVE2(xptr);
477 
478     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
479         xive2_error(xive, "IC: CPU %x is not enabled", pir);
480     }
481     return xive;
482 }
483 
484 /*
485  * The internal sources of the interrupt controller have no knowledge
486  * of the XIVE2 chip on which they reside. Encode the block id in the
487  * source interrupt number before forwarding the source event
488  * notification to the Router. This is required on a multichip system.
489  */
490 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno)
491 {
492     PnvXive2 *xive = PNV_XIVE2(xn);
493     uint8_t blk = pnv_xive2_block_id(xive);
494 
495     xive2_router_notify(xn, XIVE_EAS(blk, srcno));
496 }
497 
498 /*
499  * Set Translation Tables
500  *
501  * TODO add support for multiple sets
502  */
503 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
504 {
505     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
506     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
507                                   xive->cq_regs[CQ_TAR >> 3]);
508 
509     switch (tsel) {
510     case CQ_TAR_NVPG:
511     case CQ_TAR_ESB:
512     case CQ_TAR_END:
513         xive->tables[tsel][entry] = val;
514         break;
515     default:
516         xive2_error(xive, "IC: unsupported table %d", tsel);
517         return -1;
518     }
519 
520     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
521         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
522                      xive->cq_regs[CQ_TAR >> 3], ++entry);
523     }
524 
525     return 0;
526 }
527 /*
528  * Virtual Structure Tables (VST) configuration
529  */
530 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
531                                         uint8_t blk, uint64_t vsd)
532 {
533     Xive2EndSource *end_xsrc = &xive->end_source;
534     XiveSource *xsrc = &xive->ipi_source;
535     const XiveVstInfo *info = &vst_infos[type];
536     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
537     uint64_t vst_tsize = 1ull << page_shift;
538     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
539 
540     /* Basic checks */
541 
542     if (VSD_INDIRECT & vsd) {
543         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
544             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
545                        page_shift);
546             return;
547         }
548     }
549 
550     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
551         xive2_error(xive, "VST: %s table address 0x%"PRIx64
552                     " is not aligned with page shift %d",
553                     info->name, vst_addr, page_shift);
554         return;
555     }
556 
557     /* Record the table configuration (in SRAM on HW) */
558     xive->vsds[type][blk] = vsd;
559 
560     /* Now tune the models with the configuration provided by the FW */
561 
562     switch (type) {
563     case VST_ESB:
564         /*
565          * Backing store pages for the source PQ bits. The model does
566          * not use these PQ bits backed in RAM because the XiveSource
567          * model has its own.
568          *
569          * If the table is direct, we can compute the number of PQ
570          * entries provisioned by FW (such as skiboot) and resize the
571          * ESB window accordingly.
572          */
573         if (!(VSD_INDIRECT & vsd)) {
574             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
575                                    * (1ull << xsrc->esb_shift));
576         }
577 
578         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
579         break;
580 
581     case VST_EAS:  /* Nothing to be done */
582         break;
583 
584     case VST_END:
585         /*
586          * Backing store pages for the END.
587          */
588         if (!(VSD_INDIRECT & vsd)) {
589             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
590                                    * (1ull << end_xsrc->esb_shift));
591         }
592         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
593         break;
594 
595     case VST_NVP:  /* Not modeled */
596     case VST_NVG:  /* Not modeled */
597     case VST_NVC:  /* Not modeled */
598     case VST_IC:   /* Not modeled */
599     case VST_SYNC: /* Not modeled */
600     case VST_ERQ:  /* Not modeled */
601         break;
602 
603     default:
604         g_assert_not_reached();
605     }
606 }
607 
608 /*
609  * Both PC and VC sub-engines are configured as each use the Virtual
610  * Structure Tables
611  */
612 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
613 {
614     uint8_t mode = GETFIELD(VSD_MODE, vsd);
615     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
616                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
617     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
618                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
619     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
620 
621     if (type > VST_ERQ) {
622         xive2_error(xive, "VST: invalid table type %d", type);
623         return;
624     }
625 
626     if (blk >= vst_infos[type].max_blocks) {
627         xive2_error(xive, "VST: invalid block id %d for"
628                       " %s table", blk, vst_infos[type].name);
629         return;
630     }
631 
632     if (!vst_addr) {
633         xive2_error(xive, "VST: invalid %s table address",
634                    vst_infos[type].name);
635         return;
636     }
637 
638     switch (mode) {
639     case VSD_MODE_FORWARD:
640         xive->vsds[type][blk] = vsd;
641         break;
642 
643     case VSD_MODE_EXCLUSIVE:
644         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
645         break;
646 
647     default:
648         xive2_error(xive, "VST: unsupported table mode %d", mode);
649         return;
650     }
651 }
652 
653 /*
654  * MMIO handlers
655  */
656 
657 
658 /*
659  * IC BAR layout
660  *
661  * Page 0: Internal CQ register accesses (reads & writes)
662  * Page 1: Internal PC register accesses (reads & writes)
663  * Page 2: Internal VC register accesses (reads & writes)
664  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
665  * Page 4: Notify Port page (writes only, w/data),
666  * Page 5: Reserved
667  * Page 6: Sync Poll page (writes only, dataless)
668  * Page 7: Sync Inject page (writes only, dataless)
669  * Page 8: LSI Trigger page (writes only, dataless)
670  * Page 9: LSI SB Management page (reads & writes dataless)
671  * Pages 10-255: Reserved
672  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
673  *                covering the 128 threads in P10.
674  * Pages 384-511: Reserved
675  */
676 typedef struct PnvXive2Region {
677     const char *name;
678     uint32_t pgoff;
679     uint32_t pgsize;
680     const MemoryRegionOps *ops;
681 } PnvXive2Region;
682 
683 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
684 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
685 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
686 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
687 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
688 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
689 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
690 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
691 
692 /* 512 pages. 4K: 2M range, 64K: 32M range */
693 static const PnvXive2Region pnv_xive2_ic_regions[] = {
694     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
695     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
696     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
697     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
698     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
699     /* page 5 reserved */
700     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
701     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
702     /* pages 10-255 reserved */
703     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
704     /* pages 384-511 reserved */
705 };
706 
707 /*
708  * CQ operations
709  */
710 
711 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
712                                         unsigned size)
713 {
714     PnvXive2 *xive = PNV_XIVE2(opaque);
715     uint32_t reg = offset >> 3;
716     uint64_t val = 0;
717 
718     switch (offset) {
719     case CQ_XIVE_CAP: /* Set at reset */
720     case CQ_XIVE_CFG:
721         val = xive->cq_regs[reg];
722         break;
723     case CQ_MSGSND: /* TODO check the #cores of the machine */
724         val = 0xffffffff00000000;
725         break;
726     case CQ_CFG_PB_GEN:
727         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
728         break;
729     default:
730         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
731     }
732 
733     return val;
734 }
735 
736 static uint64_t pnv_xive2_bar_size(uint64_t val)
737 {
738     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
739 }
740 
741 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
742                                   uint64_t val, unsigned size)
743 {
744     PnvXive2 *xive = PNV_XIVE2(opaque);
745     MemoryRegion *sysmem = get_system_memory();
746     uint32_t reg = offset >> 3;
747     int i;
748 
749     switch (offset) {
750     case CQ_XIVE_CFG:
751     case CQ_RST_CTL: /* TODO: reset all BARs */
752         break;
753 
754     case CQ_IC_BAR:
755         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
756         if (!(val & CQ_IC_BAR_VALID)) {
757             xive->ic_base = 0;
758             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
759                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
760                     memory_region_del_subregion(&xive->ic_mmio,
761                                                 &xive->ic_mmios[i]);
762                 }
763                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
764             }
765         } else {
766             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
767             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
768                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
769                     memory_region_add_subregion(&xive->ic_mmio,
770                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
771                                &xive->ic_mmios[i]);
772                 }
773                 memory_region_add_subregion(sysmem, xive->ic_base,
774                                             &xive->ic_mmio);
775             }
776         }
777         break;
778 
779     case CQ_TM_BAR:
780         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
781         if (!(val & CQ_TM_BAR_VALID)) {
782             xive->tm_base = 0;
783             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
784                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
785             }
786         } else {
787             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
788             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
789                 memory_region_add_subregion(sysmem, xive->tm_base,
790                                             &xive->tm_mmio);
791             }
792         }
793         break;
794 
795     case CQ_ESB_BAR:
796         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
797         if (!(val & CQ_BAR_VALID)) {
798             xive->esb_base = 0;
799             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
800                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
801             }
802         } else {
803             xive->esb_base = val & CQ_BAR_ADDR;
804             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
805                 memory_region_set_size(&xive->esb_mmio,
806                                        pnv_xive2_bar_size(val));
807                 memory_region_add_subregion(sysmem, xive->esb_base,
808                                             &xive->esb_mmio);
809             }
810         }
811         break;
812 
813     case CQ_END_BAR:
814         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
815         if (!(val & CQ_BAR_VALID)) {
816             xive->end_base = 0;
817             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
818                 memory_region_del_subregion(sysmem, &xive->end_mmio);
819             }
820         } else {
821             xive->end_base = val & CQ_BAR_ADDR;
822             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
823                 memory_region_set_size(&xive->end_mmio,
824                                        pnv_xive2_bar_size(val));
825                 memory_region_add_subregion(sysmem, xive->end_base,
826                                             &xive->end_mmio);
827             }
828         }
829         break;
830 
831     case CQ_NVC_BAR:
832         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
833         if (!(val & CQ_BAR_VALID)) {
834             xive->nvc_base = 0;
835             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
836                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
837             }
838         } else {
839             xive->nvc_base = val & CQ_BAR_ADDR;
840             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
841                 memory_region_set_size(&xive->nvc_mmio,
842                                        pnv_xive2_bar_size(val));
843                 memory_region_add_subregion(sysmem, xive->nvc_base,
844                                             &xive->nvc_mmio);
845             }
846         }
847         break;
848 
849     case CQ_NVPG_BAR:
850         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
851         if (!(val & CQ_BAR_VALID)) {
852             xive->nvpg_base = 0;
853             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
854                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
855             }
856         } else {
857             xive->nvpg_base = val & CQ_BAR_ADDR;
858             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
859                 memory_region_set_size(&xive->nvpg_mmio,
860                                        pnv_xive2_bar_size(val));
861                 memory_region_add_subregion(sysmem, xive->nvpg_base,
862                                             &xive->nvpg_mmio);
863             }
864         }
865         break;
866 
867     case CQ_TAR: /* Set Translation Table Address */
868         break;
869     case CQ_TDR: /* Set Translation Table Data */
870         pnv_xive2_stt_set_data(xive, val);
871         break;
872     case CQ_FIRMASK_OR: /* FIR error reporting */
873         break;
874     default:
875         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
876         return;
877     }
878 
879     xive->cq_regs[reg] = val;
880 }
881 
882 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
883     .read = pnv_xive2_ic_cq_read,
884     .write = pnv_xive2_ic_cq_write,
885     .endianness = DEVICE_BIG_ENDIAN,
886     .valid = {
887         .min_access_size = 8,
888         .max_access_size = 8,
889     },
890     .impl = {
891         .min_access_size = 8,
892         .max_access_size = 8,
893     },
894 };
895 
896 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
897                                      unsigned size)
898 {
899     PnvXive2 *xive = PNV_XIVE2(opaque);
900     uint64_t val = 0;
901     uint32_t reg = offset >> 3;
902 
903     switch (offset) {
904     /*
905      * VSD table settings.
906      */
907     case VC_VSD_TABLE_ADDR:
908     case VC_VSD_TABLE_DATA:
909         val = xive->vc_regs[reg];
910         break;
911 
912     /*
913      * ESB cache updates (not modeled)
914      */
915     case VC_ESBC_FLUSH_CTRL:
916         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
917         val = xive->vc_regs[reg];
918         break;
919 
920     /*
921      * EAS cache updates (not modeled)
922      */
923     case VC_EASC_FLUSH_CTRL:
924         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
925         val = xive->vc_regs[reg];
926         break;
927 
928     /*
929      * END cache updates
930      */
931     case VC_ENDC_WATCH0_SPEC:
932         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
933         val = xive->vc_regs[reg];
934         break;
935 
936     case VC_ENDC_WATCH0_DATA0:
937         /*
938          * Load DATA registers from cache with data requested by the
939          * SPEC register
940          */
941         pnv_xive2_end_cache_load(xive);
942         val = xive->vc_regs[reg];
943         break;
944 
945     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
946         val = xive->vc_regs[reg];
947         break;
948 
949     case VC_ENDC_FLUSH_CTRL:
950         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
951         val = xive->vc_regs[reg];
952         break;
953 
954     /*
955      * Indirect invalidation
956      */
957     case VC_AT_MACRO_KILL_MASK:
958         val = xive->vc_regs[reg];
959         break;
960 
961     case VC_AT_MACRO_KILL:
962         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
963         val = xive->vc_regs[reg];
964         break;
965 
966     /*
967      * Interrupt fifo overflow in memory backing store (Not modeled)
968      */
969     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
970         val = xive->vc_regs[reg];
971         break;
972 
973     /*
974      * Synchronisation
975      */
976     case VC_ENDC_SYNC_DONE:
977         val = VC_ENDC_SYNC_POLL_DONE;
978         break;
979     default:
980         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
981     }
982 
983     return val;
984 }
985 
986 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
987                                   uint64_t val, unsigned size)
988 {
989     PnvXive2 *xive = PNV_XIVE2(opaque);
990     uint32_t reg = offset >> 3;
991 
992     switch (offset) {
993     /*
994      * VSD table settings.
995      */
996     case VC_VSD_TABLE_ADDR:
997        break;
998     case VC_VSD_TABLE_DATA:
999         pnv_xive2_vst_set_data(xive, val);
1000         break;
1001 
1002     /*
1003      * ESB cache updates (not modeled)
1004      */
1005     /* case VC_ESBC_FLUSH_CTRL: */
1006     case VC_ESBC_FLUSH_POLL:
1007         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1008         /* ESB update */
1009         break;
1010 
1011     /*
1012      * EAS cache updates (not modeled)
1013      */
1014     /* case VC_EASC_FLUSH_CTRL: */
1015     case VC_EASC_FLUSH_POLL:
1016         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1017         /* EAS update */
1018         break;
1019 
1020     /*
1021      * END cache updates
1022      */
1023     case VC_ENDC_WATCH0_SPEC:
1024          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1025         break;
1026 
1027     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1028         break;
1029     case VC_ENDC_WATCH0_DATA0:
1030         /* writing to DATA0 triggers the cache write */
1031         xive->vc_regs[reg] = val;
1032         pnv_xive2_end_update(xive);
1033         break;
1034 
1035 
1036     /* case VC_ENDC_FLUSH_CTRL: */
1037     case VC_ENDC_FLUSH_POLL:
1038         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1039         break;
1040 
1041     /*
1042      * Indirect invalidation
1043      */
1044     case VC_AT_MACRO_KILL:
1045     case VC_AT_MACRO_KILL_MASK:
1046         break;
1047 
1048     /*
1049      * Interrupt fifo overflow in memory backing store (Not modeled)
1050      */
1051     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1052         break;
1053 
1054     /*
1055      * Synchronisation
1056      */
1057     case VC_ENDC_SYNC_DONE:
1058         break;
1059 
1060     default:
1061         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1062         return;
1063     }
1064 
1065     xive->vc_regs[reg] = val;
1066 }
1067 
1068 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1069     .read = pnv_xive2_ic_vc_read,
1070     .write = pnv_xive2_ic_vc_write,
1071     .endianness = DEVICE_BIG_ENDIAN,
1072     .valid = {
1073         .min_access_size = 8,
1074         .max_access_size = 8,
1075     },
1076     .impl = {
1077         .min_access_size = 8,
1078         .max_access_size = 8,
1079     },
1080 };
1081 
1082 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1083                                      unsigned size)
1084 {
1085     PnvXive2 *xive = PNV_XIVE2(opaque);
1086     uint64_t val = -1;
1087     uint32_t reg = offset >> 3;
1088 
1089     switch (offset) {
1090     /*
1091      * VSD table settings.
1092      */
1093     case PC_VSD_TABLE_ADDR:
1094     case PC_VSD_TABLE_DATA:
1095         val = xive->pc_regs[reg];
1096         break;
1097 
1098     /*
1099      * cache updates
1100      */
1101     case PC_NXC_WATCH0_SPEC:
1102         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1103         val = xive->pc_regs[reg];
1104         break;
1105 
1106     case PC_NXC_WATCH0_DATA0:
1107        /*
1108         * Load DATA registers from cache with data requested by the
1109         * SPEC register
1110         */
1111         pnv_xive2_nvp_cache_load(xive);
1112         val = xive->pc_regs[reg];
1113         break;
1114 
1115     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1116         val = xive->pc_regs[reg];
1117         break;
1118 
1119     case PC_NXC_FLUSH_CTRL:
1120         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1121         val = xive->pc_regs[reg];
1122         break;
1123 
1124     /*
1125      * Indirect invalidation
1126      */
1127     case PC_AT_KILL:
1128         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1129         val = xive->pc_regs[reg];
1130         break;
1131 
1132     default:
1133         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1134     }
1135 
1136     return val;
1137 }
1138 
1139 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1140                                   uint64_t val, unsigned size)
1141 {
1142     PnvXive2 *xive = PNV_XIVE2(opaque);
1143     uint32_t reg = offset >> 3;
1144 
1145     switch (offset) {
1146 
1147     /*
1148      * VSD table settings. Only taken into account in the VC
1149      * sub-engine because the Xive2Router model combines both VC and PC
1150      * sub-engines
1151      */
1152     case PC_VSD_TABLE_ADDR:
1153     case PC_VSD_TABLE_DATA:
1154         break;
1155 
1156     /*
1157      * cache updates
1158      */
1159     case PC_NXC_WATCH0_SPEC:
1160         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1161         break;
1162 
1163     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1164         break;
1165     case PC_NXC_WATCH0_DATA0:
1166         /* writing to DATA0 triggers the cache write */
1167         xive->pc_regs[reg] = val;
1168         pnv_xive2_nvp_update(xive);
1169         break;
1170 
1171    /* case PC_NXC_FLUSH_CTRL: */
1172     case PC_NXC_FLUSH_POLL:
1173         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1174         break;
1175 
1176     /*
1177      * Indirect invalidation
1178      */
1179     case PC_AT_KILL:
1180     case PC_AT_KILL_MASK:
1181         break;
1182 
1183     default:
1184         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1185         return;
1186     }
1187 
1188     xive->pc_regs[reg] = val;
1189 }
1190 
1191 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1192     .read = pnv_xive2_ic_pc_read,
1193     .write = pnv_xive2_ic_pc_write,
1194     .endianness = DEVICE_BIG_ENDIAN,
1195     .valid = {
1196         .min_access_size = 8,
1197         .max_access_size = 8,
1198     },
1199     .impl = {
1200         .min_access_size = 8,
1201         .max_access_size = 8,
1202     },
1203 };
1204 
1205 
1206 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1207                                         unsigned size)
1208 {
1209     PnvXive2 *xive = PNV_XIVE2(opaque);
1210     uint64_t val = -1;
1211     uint32_t reg = offset >> 3;
1212 
1213     switch (offset) {
1214     /*
1215      * XIVE2 hardware thread enablement
1216      */
1217     case TCTXT_EN0:
1218     case TCTXT_EN1:
1219         val = xive->tctxt_regs[reg];
1220         break;
1221 
1222     case TCTXT_EN0_SET:
1223     case TCTXT_EN0_RESET:
1224         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1225         break;
1226     case TCTXT_EN1_SET:
1227     case TCTXT_EN1_RESET:
1228         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1229         break;
1230     default:
1231         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1232     }
1233 
1234     return val;
1235 }
1236 
1237 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1238                                      uint64_t val, unsigned size)
1239 {
1240     PnvXive2 *xive = PNV_XIVE2(opaque);
1241     uint32_t reg = offset >> 3;
1242 
1243     switch (offset) {
1244     /*
1245      * XIVE2 hardware thread enablement
1246      */
1247     case TCTXT_EN0: /* Physical Thread Enable */
1248     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1249         break;
1250 
1251     case TCTXT_EN0_SET:
1252         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1253         break;
1254     case TCTXT_EN1_SET:
1255         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1256         break;
1257     case TCTXT_EN0_RESET:
1258         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1259         break;
1260     case TCTXT_EN1_RESET:
1261         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1262         break;
1263 
1264     default:
1265         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1266         return;
1267     }
1268 
1269     xive->pc_regs[reg] = val;
1270 }
1271 
1272 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1273     .read = pnv_xive2_ic_tctxt_read,
1274     .write = pnv_xive2_ic_tctxt_write,
1275     .endianness = DEVICE_BIG_ENDIAN,
1276     .valid = {
1277         .min_access_size = 8,
1278         .max_access_size = 8,
1279     },
1280     .impl = {
1281         .min_access_size = 8,
1282         .max_access_size = 8,
1283     },
1284 };
1285 
1286 /*
1287  * Redirect XSCOM to MMIO handlers
1288  */
1289 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1290                                      unsigned size)
1291 {
1292     PnvXive2 *xive = PNV_XIVE2(opaque);
1293     uint64_t val = -1;
1294     uint32_t xscom_reg = offset >> 3;
1295     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1296 
1297     switch (xscom_reg) {
1298     case 0x000 ... 0x0FF:
1299         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1300         break;
1301     case 0x100 ... 0x1FF:
1302         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1303         break;
1304     case 0x200 ... 0x2FF:
1305         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1306         break;
1307     case 0x300 ... 0x3FF:
1308         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1309         break;
1310     default:
1311         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1312     }
1313 
1314     return val;
1315 }
1316 
1317 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1318                                   uint64_t val, unsigned size)
1319 {
1320     PnvXive2 *xive = PNV_XIVE2(opaque);
1321     uint32_t xscom_reg = offset >> 3;
1322     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1323 
1324     switch (xscom_reg) {
1325     case 0x000 ... 0x0FF:
1326         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1327         break;
1328     case 0x100 ... 0x1FF:
1329         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1330         break;
1331     case 0x200 ... 0x2FF:
1332         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1333         break;
1334     case 0x300 ... 0x3FF:
1335         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1336         break;
1337     default:
1338         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1339     }
1340 }
1341 
1342 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1343     .read = pnv_xive2_xscom_read,
1344     .write = pnv_xive2_xscom_write,
1345     .endianness = DEVICE_BIG_ENDIAN,
1346     .valid = {
1347         .min_access_size = 8,
1348         .max_access_size = 8,
1349     },
1350     .impl = {
1351         .min_access_size = 8,
1352         .max_access_size = 8,
1353     },
1354 };
1355 
1356 /*
1357  * Notify port page. The layout is compatible between 4K and 64K pages :
1358  *
1359  * Page 1           Notify page (writes only)
1360  *  0x000 - 0x7FF   IPI interrupt (NPU)
1361  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1362  */
1363 
1364 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1365                                     uint64_t val)
1366 {
1367     uint8_t blk;
1368     uint32_t idx;
1369 
1370     if (val & XIVE_TRIGGER_END) {
1371         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1372                    addr, val);
1373         return;
1374     }
1375 
1376     /*
1377      * Forward the source event notification directly to the Router.
1378      * The source interrupt number should already be correctly encoded
1379      * with the chip block id by the sending device (PHB, PSI).
1380      */
1381     blk = XIVE_EAS_BLOCK(val);
1382     idx = XIVE_EAS_INDEX(val);
1383 
1384     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1385 }
1386 
1387 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1388                                       uint64_t val, unsigned size)
1389 {
1390     PnvXive2 *xive = PNV_XIVE2(opaque);
1391 
1392     /* VC: IPI triggers */
1393     switch (offset) {
1394     case 0x000 ... 0x7FF:
1395         /* TODO: check IPI notify sub-page routing */
1396         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1397         break;
1398 
1399     /* VC: HW triggers */
1400     case 0x800 ... 0xFFF:
1401         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1402         break;
1403 
1404     default:
1405         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1406     }
1407 }
1408 
1409 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1410                                          unsigned size)
1411 {
1412     PnvXive2 *xive = PNV_XIVE2(opaque);
1413 
1414    /* loads are invalid */
1415     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1416     return -1;
1417 }
1418 
1419 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1420     .read = pnv_xive2_ic_notify_read,
1421     .write = pnv_xive2_ic_notify_write,
1422     .endianness = DEVICE_BIG_ENDIAN,
1423     .valid = {
1424         .min_access_size = 8,
1425         .max_access_size = 8,
1426     },
1427     .impl = {
1428         .min_access_size = 8,
1429         .max_access_size = 8,
1430     },
1431 };
1432 
1433 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1434                                       unsigned size)
1435 {
1436     PnvXive2 *xive = PNV_XIVE2(opaque);
1437 
1438     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1439     return -1;
1440 }
1441 
1442 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1443                                    uint64_t val, unsigned size)
1444 {
1445     PnvXive2 *xive = PNV_XIVE2(opaque);
1446 
1447     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1448 }
1449 
1450 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1451     .read = pnv_xive2_ic_lsi_read,
1452     .write = pnv_xive2_ic_lsi_write,
1453     .endianness = DEVICE_BIG_ENDIAN,
1454     .valid = {
1455         .min_access_size = 8,
1456         .max_access_size = 8,
1457     },
1458     .impl = {
1459         .min_access_size = 8,
1460         .max_access_size = 8,
1461     },
1462 };
1463 
1464 /*
1465  * Sync MMIO page (write only)
1466  */
1467 #define PNV_XIVE2_SYNC_IPI      0x000
1468 #define PNV_XIVE2_SYNC_HW       0x080
1469 #define PNV_XIVE2_SYNC_NxC      0x100
1470 #define PNV_XIVE2_SYNC_INT      0x180
1471 #define PNV_XIVE2_SYNC_OS_ESC   0x200
1472 #define PNV_XIVE2_SYNC_POOL_ESC 0x280
1473 #define PNV_XIVE2_SYNC_HARD_ESC 0x300
1474 
1475 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1476                                        unsigned size)
1477 {
1478     PnvXive2 *xive = PNV_XIVE2(opaque);
1479 
1480     /* loads are invalid */
1481     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1482     return -1;
1483 }
1484 
1485 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1486                                     uint64_t val, unsigned size)
1487 {
1488     PnvXive2 *xive = PNV_XIVE2(opaque);
1489 
1490     switch (offset) {
1491     case PNV_XIVE2_SYNC_IPI:
1492     case PNV_XIVE2_SYNC_HW:
1493     case PNV_XIVE2_SYNC_NxC:
1494     case PNV_XIVE2_SYNC_INT:
1495     case PNV_XIVE2_SYNC_OS_ESC:
1496     case PNV_XIVE2_SYNC_POOL_ESC:
1497     case PNV_XIVE2_SYNC_HARD_ESC:
1498         break;
1499     default:
1500         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1501     }
1502 }
1503 
1504 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1505     .read = pnv_xive2_ic_sync_read,
1506     .write = pnv_xive2_ic_sync_write,
1507     .endianness = DEVICE_BIG_ENDIAN,
1508     .valid = {
1509         .min_access_size = 8,
1510         .max_access_size = 8,
1511     },
1512     .impl = {
1513         .min_access_size = 8,
1514         .max_access_size = 8,
1515     },
1516 };
1517 
1518 /*
1519  * When the TM direct pages of the IC controller are accessed, the
1520  * target HW thread is deduced from the page offset.
1521  */
1522 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
1523 {
1524     PnvChip *chip = xive->chip;
1525     PowerPCCPU *cpu = NULL;
1526 
1527     cpu = pnv_chip_find_cpu(chip, pir);
1528     if (!cpu) {
1529         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
1530         return NULL;
1531     }
1532 
1533     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
1534         xive2_error(xive, "IC: CPU %x is not enabled", pir);
1535     }
1536 
1537     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1538 }
1539 
1540 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
1541                                               unsigned size)
1542 {
1543     PnvXive2 *xive = PNV_XIVE2(opaque);
1544     uint32_t pir = offset >> xive->ic_shift;
1545     XiveTCTX *tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1546     uint64_t val = -1;
1547 
1548     if (tctx) {
1549         val = xive_tctx_tm_read(NULL, tctx, offset, size);
1550     }
1551 
1552     return val;
1553 }
1554 
1555 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
1556                                            uint64_t val, unsigned size)
1557 {
1558     PnvXive2 *xive = PNV_XIVE2(opaque);
1559     uint32_t pir = offset >> xive->ic_shift;
1560     XiveTCTX *tctx = pnv_xive2_get_indirect_tctx(xive, pir);
1561 
1562     if (tctx) {
1563         xive_tctx_tm_write(NULL, tctx, offset, val, size);
1564     }
1565 }
1566 
1567 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
1568     .read = pnv_xive2_ic_tm_indirect_read,
1569     .write = pnv_xive2_ic_tm_indirect_write,
1570     .endianness = DEVICE_BIG_ENDIAN,
1571     .valid = {
1572         .min_access_size = 8,
1573         .max_access_size = 8,
1574     },
1575     .impl = {
1576         .min_access_size = 8,
1577         .max_access_size = 8,
1578     },
1579 };
1580 
1581 /*
1582  * TIMA ops
1583  */
1584 
1585 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
1586                                uint64_t value, unsigned size)
1587 {
1588     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1589     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1590     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1591 
1592     /* Other TM ops are the same as XIVE1 */
1593     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1594 }
1595 
1596 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
1597 {
1598     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1599     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
1600     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1601 
1602     /* Other TM ops are the same as XIVE1 */
1603     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1604 }
1605 
1606 static const MemoryRegionOps pnv_xive2_tm_ops = {
1607     .read = pnv_xive2_tm_read,
1608     .write = pnv_xive2_tm_write,
1609     .endianness = DEVICE_BIG_ENDIAN,
1610     .valid = {
1611         .min_access_size = 1,
1612         .max_access_size = 8,
1613     },
1614     .impl = {
1615         .min_access_size = 1,
1616         .max_access_size = 8,
1617     },
1618 };
1619 
1620 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
1621                                    unsigned size)
1622 {
1623     PnvXive2 *xive = PNV_XIVE2(opaque);
1624 
1625     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
1626     return -1;
1627 }
1628 
1629 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
1630                                 uint64_t val, unsigned size)
1631 {
1632     PnvXive2 *xive = PNV_XIVE2(opaque);
1633 
1634     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
1635 }
1636 
1637 static const MemoryRegionOps pnv_xive2_nvc_ops = {
1638     .read = pnv_xive2_nvc_read,
1639     .write = pnv_xive2_nvc_write,
1640     .endianness = DEVICE_BIG_ENDIAN,
1641     .valid = {
1642         .min_access_size = 8,
1643         .max_access_size = 8,
1644     },
1645     .impl = {
1646         .min_access_size = 8,
1647         .max_access_size = 8,
1648     },
1649 };
1650 
1651 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
1652                                     unsigned size)
1653 {
1654     PnvXive2 *xive = PNV_XIVE2(opaque);
1655 
1656     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
1657     return -1;
1658 }
1659 
1660 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
1661                                  uint64_t val, unsigned size)
1662 {
1663     PnvXive2 *xive = PNV_XIVE2(opaque);
1664 
1665     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
1666 }
1667 
1668 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
1669     .read = pnv_xive2_nvpg_read,
1670     .write = pnv_xive2_nvpg_write,
1671     .endianness = DEVICE_BIG_ENDIAN,
1672     .valid = {
1673         .min_access_size = 8,
1674         .max_access_size = 8,
1675     },
1676     .impl = {
1677         .min_access_size = 8,
1678         .max_access_size = 8,
1679     },
1680 };
1681 
1682 /*
1683  * POWER10 default capabilities: 0x2000120076f00000
1684  */
1685 #define PNV_XIVE2_CAPABILITIES  0x2000120076f00000
1686 
1687 /*
1688  * POWER10 default configuration: 0x0030000033000000
1689  *
1690  * 8bits thread id was dropped for P10
1691  */
1692 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
1693 
1694 static void pnv_xive2_reset(void *dev)
1695 {
1696     PnvXive2 *xive = PNV_XIVE2(dev);
1697     XiveSource *xsrc = &xive->ipi_source;
1698     Xive2EndSource *end_xsrc = &xive->end_source;
1699 
1700     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
1701     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
1702 
1703     /* HW hardwires the #Topology of the chip in the block field */
1704     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
1705         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
1706 
1707     /* Set default page size to 64k */
1708     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
1709     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
1710 
1711     /* Clear source MMIOs */
1712     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1713         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
1714     }
1715 
1716     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1717         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
1718     }
1719 }
1720 
1721 /*
1722  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
1723  *  software.
1724  */
1725 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1726 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1727 
1728 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
1729 {
1730     PnvXive2 *xive = PNV_XIVE2(dev);
1731     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
1732     XiveSource *xsrc = &xive->ipi_source;
1733     Xive2EndSource *end_xsrc = &xive->end_source;
1734     Error *local_err = NULL;
1735     int i;
1736 
1737     pxc->parent_realize(dev, &local_err);
1738     if (local_err) {
1739         error_propagate(errp, local_err);
1740         return;
1741     }
1742 
1743     assert(xive->chip);
1744 
1745     /*
1746      * The XiveSource and Xive2EndSource objects are realized with the
1747      * maximum allowed HW configuration. The ESB MMIO regions will be
1748      * resized dynamically when the controller is configured by the FW
1749      * to limit accesses to resources not provisioned.
1750      */
1751     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
1752                             &error_fatal);
1753     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
1754                             &error_fatal);
1755     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
1756                              &error_fatal);
1757     qdev_realize(DEVICE(xsrc), NULL, &local_err);
1758     if (local_err) {
1759         error_propagate(errp, local_err);
1760         return;
1761     }
1762 
1763     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
1764                             &error_fatal);
1765     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1766                              &error_abort);
1767     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
1768     if (local_err) {
1769         error_propagate(errp, local_err);
1770         return;
1771     }
1772 
1773     /* XSCOM region, used for initial configuration of the BARs */
1774     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
1775                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
1776                           PNV10_XSCOM_XIVE2_SIZE << 3);
1777 
1778     /* Interrupt controller MMIO regions */
1779     xive->ic_shift = 16;
1780     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1781                        PNV10_XIVE2_IC_SIZE);
1782 
1783     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
1784         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
1785                          pnv_xive2_ic_regions[i].ops, xive,
1786                          pnv_xive2_ic_regions[i].name,
1787                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
1788     }
1789 
1790     /*
1791      * VC MMIO regions.
1792      */
1793     xive->esb_shift = 16;
1794     xive->end_shift = 16;
1795     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
1796                        PNV10_XIVE2_ESB_SIZE);
1797     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
1798                        PNV10_XIVE2_END_SIZE);
1799 
1800     /* Presenter Controller MMIO region (not modeled) */
1801     xive->nvc_shift = 16;
1802     xive->nvpg_shift = 16;
1803     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
1804                           &pnv_xive2_nvc_ops, xive,
1805                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
1806 
1807     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
1808                           &pnv_xive2_nvpg_ops, xive,
1809                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
1810 
1811     /* Thread Interrupt Management Area (Direct) */
1812     xive->tm_shift = 16;
1813     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
1814                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
1815 
1816     qemu_register_reset(pnv_xive2_reset, dev);
1817 }
1818 
1819 static Property pnv_xive2_properties[] = {
1820     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
1821     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
1822     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
1823     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
1824     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
1825     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
1826     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
1827                        PNV_XIVE2_CAPABILITIES),
1828     DEFINE_PROP_UINT64("config", PnvXive2, config,
1829                        PNV_XIVE2_CONFIGURATION),
1830     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
1831     DEFINE_PROP_END_OF_LIST(),
1832 };
1833 
1834 static void pnv_xive2_instance_init(Object *obj)
1835 {
1836     PnvXive2 *xive = PNV_XIVE2(obj);
1837 
1838     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1839                             TYPE_XIVE_SOURCE);
1840     object_initialize_child(obj, "end_source", &xive->end_source,
1841                             TYPE_XIVE2_END_SOURCE);
1842 }
1843 
1844 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
1845                               int xscom_offset)
1846 {
1847     const char compat_p10[] = "ibm,power10-xive-x";
1848     char *name;
1849     int offset;
1850     uint32_t reg[] = {
1851         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
1852         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
1853     };
1854 
1855     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
1856     offset = fdt_add_subnode(fdt, xscom_offset, name);
1857     _FDT(offset);
1858     g_free(name);
1859 
1860     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1861     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
1862                      sizeof(compat_p10)));
1863     return 0;
1864 }
1865 
1866 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
1867 {
1868     DeviceClass *dc = DEVICE_CLASS(klass);
1869     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1870     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
1871     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1872     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1873     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
1874 
1875     xdc->dt_xscom  = pnv_xive2_dt_xscom;
1876 
1877     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
1878     device_class_set_parent_realize(dc, pnv_xive2_realize,
1879                                     &pxc->parent_realize);
1880     device_class_set_props(dc, pnv_xive2_properties);
1881 
1882     xrc->get_eas   = pnv_xive2_get_eas;
1883     xrc->get_end   = pnv_xive2_get_end;
1884     xrc->write_end = pnv_xive2_write_end;
1885     xrc->get_nvp   = pnv_xive2_get_nvp;
1886     xrc->write_nvp = pnv_xive2_write_nvp;
1887     xrc->get_block_id = pnv_xive2_get_block_id;
1888 
1889     xnc->notify    = pnv_xive2_notify;
1890 
1891     xpc->match_nvt  = pnv_xive2_match_nvt;
1892 };
1893 
1894 static const TypeInfo pnv_xive2_info = {
1895     .name          = TYPE_PNV_XIVE2,
1896     .parent        = TYPE_XIVE2_ROUTER,
1897     .instance_init = pnv_xive2_instance_init,
1898     .instance_size = sizeof(PnvXive2),
1899     .class_init    = pnv_xive2_class_init,
1900     .class_size    = sizeof(PnvXive2Class),
1901     .interfaces    = (InterfaceInfo[]) {
1902         { TYPE_PNV_XSCOM_INTERFACE },
1903         { }
1904     }
1905 };
1906 
1907 static void pnv_xive2_register_types(void)
1908 {
1909     type_register_static(&pnv_xive2_info);
1910 }
1911 
1912 type_init(pnv_xive2_register_types)
1913 
1914 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
1915                                      Monitor *mon)
1916 {
1917     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
1918     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
1919 
1920     if (!xive2_nvp_is_valid(nvp)) {
1921         return;
1922     }
1923 
1924     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n",
1925                    nvp_idx, eq_blk, eq_idx,
1926                    xive_get_field32(NVP2_W2_IPB, nvp->w2));
1927 }
1928 
1929 /*
1930  * If the table is direct, we can compute the number of PQ entries
1931  * provisioned by FW.
1932  */
1933 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
1934 {
1935     uint8_t blk = pnv_xive2_block_id(xive);
1936     uint64_t vsd = xive->vsds[VST_ESB][blk];
1937     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
1938 
1939     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
1940 }
1941 
1942 /*
1943  * Compute the number of entries per indirect subpage.
1944  */
1945 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
1946 {
1947     uint8_t blk = pnv_xive2_block_id(xive);
1948     uint64_t vsd = xive->vsds[type][blk];
1949     const XiveVstInfo *info = &vst_infos[type];
1950     uint64_t vsd_addr;
1951     uint32_t page_shift;
1952 
1953     /* For direct tables, fake a valid value */
1954     if (!(VSD_INDIRECT & vsd)) {
1955         return 1;
1956     }
1957 
1958     /* Get the page size of the indirect table. */
1959     vsd_addr = vsd & VSD_ADDRESS_MASK;
1960     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
1961 
1962     if (!(vsd & VSD_ADDRESS_MASK)) {
1963 #ifdef XIVE2_DEBUG
1964         xive2_error(xive, "VST: invalid %s entry!?", info->name);
1965 #endif
1966         return 0;
1967     }
1968 
1969     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
1970 
1971     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
1972         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
1973                    page_shift);
1974         return 0;
1975     }
1976 
1977     return (1ull << page_shift) / info->size;
1978 }
1979 
1980 void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon)
1981 {
1982     Xive2Router *xrtr = XIVE2_ROUTER(xive);
1983     uint8_t blk = pnv_xive2_block_id(xive);
1984     uint8_t chip_id = xive->chip->chip_id;
1985     uint32_t srcno0 = XIVE_EAS(blk, 0);
1986     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
1987     Xive2Eas eas;
1988     Xive2End end;
1989     Xive2Nvp nvp;
1990     int i;
1991     uint64_t xive_nvp_per_subpage;
1992 
1993     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1994                    srcno0 + nr_esbs - 1);
1995     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1996 
1997     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1998                    srcno0 + nr_esbs - 1);
1999     for (i = 0; i < nr_esbs; i++) {
2000         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2001             break;
2002         }
2003         if (!xive2_eas_is_masked(&eas)) {
2004             xive2_eas_pic_print_info(&eas, i, mon);
2005         }
2006     }
2007 
2008     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
2009     i = 0;
2010     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2011         xive2_end_eas_pic_print_info(&end, i++, mon);
2012     }
2013 
2014     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2015     i = 0;
2016     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2017         xive2_end_pic_print_info(&end, i++, mon);
2018     }
2019 
2020     monitor_printf(mon, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk,
2021                    0, XIVE2_NVP_COUNT - 1);
2022     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2023     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2024         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2025             xive2_nvp_pic_print_info(&nvp, i++, mon);
2026         }
2027     }
2028 }
2029