xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision d6d5f5c0347b124319ff9c0a43358bdae1d7ea26)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/ppc/fdt.h"
17 #include "hw/ppc/pnv.h"
18 #include "hw/ppc/pnv_chip.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/qtest.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive2_regs.h"
33 
34 #undef XIVE2_DEBUG
35 
36 /* XIVE Sync or Flush Notification Block */
37 typedef struct XiveSfnBlock {
38     uint8_t bytes[32];
39 } XiveSfnBlock;
40 
41 /* XIVE Thread Sync or Flush Notification Area */
42 typedef struct XiveThreadNA {
43     XiveSfnBlock topo[16];
44 } XiveThreadNA;
45 
46 /*
47  * Virtual structures table (VST)
48  */
49 #define SBE_PER_BYTE   4
50 
51 typedef struct XiveVstInfo {
52     const char *name;
53     uint32_t    size;
54     uint32_t    max_blocks;
55 } XiveVstInfo;
56 
57 static const XiveVstInfo vst_infos[] = {
58 
59     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),     16 },
60     [VST_ESB]  = { "ESB",  1,                    16 },
61     [VST_END]  = { "ENDT", sizeof(Xive2End),     16 },
62 
63     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),     16 },
64     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc),    16 },
65     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc),    16 },
66 
67     [VST_IC]  =  { "IC",   1, /* ? */            16 }, /* Topology # */
68     [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
69 
70     /*
71      * This table contains the backing store pages for the interrupt
72      * fifos of the VC sub-engine in case of overflow.
73      *
74      * 0 - IPI,
75      * 1 - HWD,
76      * 2 - NxC,
77      * 3 - INT,
78      * 4 - OS-Queue,
79      * 5 - Pool-Queue,
80      * 6 - Hard-Queue
81      */
82     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
83 };
84 
85 #define xive2_error(xive, fmt, ...)                                      \
86     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
87                   (xive)->chip->chip_id, ## __VA_ARGS__);
88 
89 /*
90  * TODO: Document block id override
91  */
92 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
93 {
94     uint8_t blk = xive->chip->chip_id;
95     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
96 
97     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
98         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
99     }
100 
101     return blk;
102 }
103 
104 /*
105  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
106  * of the chips is good enough.
107  *
108  * TODO: Block scope support
109  */
110 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
111 {
112     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
113     int i;
114 
115     for (i = 0; i < pnv->num_chips; i++) {
116         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
117         PnvXive2 *xive = &chip10->xive;
118 
119         if (pnv_xive2_block_id(xive) == blk) {
120             return xive;
121         }
122     }
123     return NULL;
124 }
125 
126 /*
127  * VST accessors for ESB, EAT, ENDT, NVP
128  *
129  * Indirect VST tables are arrays of VSDs pointing to a page (of same
130  * size). Each page is a direct VST table.
131  */
132 
133 #define XIVE_VSD_SIZE 8
134 
135 /* Indirect page size can be 4K, 64K, 2M, 16M. */
136 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
137 {
138      return page_shift == 12 || page_shift == 16 ||
139          page_shift == 21 || page_shift == 24;
140 }
141 
142 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
143                                           uint64_t vsd, uint32_t idx)
144 {
145     const XiveVstInfo *info = &vst_infos[type];
146     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
147     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
148     uint32_t idx_max;
149 
150     idx_max = vst_tsize / info->size - 1;
151     if (idx > idx_max) {
152 #ifdef XIVE2_DEBUG
153         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
154                    info->name, idx, idx_max);
155 #endif
156         return 0;
157     }
158 
159     return vst_addr + idx * info->size;
160 }
161 
162 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
163                                             uint64_t vsd, uint32_t idx)
164 {
165     const XiveVstInfo *info = &vst_infos[type];
166     uint64_t vsd_addr;
167     uint32_t vsd_idx;
168     uint32_t page_shift;
169     uint32_t vst_per_page;
170 
171     /* Get the page size of the indirect table. */
172     vsd_addr = vsd & VSD_ADDRESS_MASK;
173     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
174 
175     if (!(vsd & VSD_ADDRESS_MASK)) {
176 #ifdef XIVE2_DEBUG
177         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
178 #endif
179         return 0;
180     }
181 
182     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
183 
184     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
185         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
186                    page_shift);
187         return 0;
188     }
189 
190     vst_per_page = (1ull << page_shift) / info->size;
191     vsd_idx = idx / vst_per_page;
192 
193     /* Load the VSD we are looking for, if not already done */
194     if (vsd_idx) {
195         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
196         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
197                    MEMTXATTRS_UNSPECIFIED);
198 
199         if (!(vsd & VSD_ADDRESS_MASK)) {
200 #ifdef XIVE2_DEBUG
201             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
202 #endif
203             return 0;
204         }
205 
206         /*
207          * Check that the pages have a consistent size across the
208          * indirect table
209          */
210         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
211             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
212                        info->name, idx);
213             return 0;
214         }
215     }
216 
217     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
218 }
219 
220 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
221                                    uint32_t idx)
222 {
223     const XiveVstInfo *info = &vst_infos[type];
224     uint64_t vsd;
225 
226     if (blk >= info->max_blocks) {
227         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
228                    blk, info->name, idx);
229         return 0;
230     }
231 
232     vsd = xive->vsds[type][blk];
233 
234     /* Remote VST access */
235     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
236         xive = pnv_xive2_get_remote(blk);
237 
238         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
239     }
240 
241     if (VSD_INDIRECT & vsd) {
242         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
243     }
244 
245     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
246 }
247 
248 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
249                              uint32_t idx, void *data)
250 {
251     const XiveVstInfo *info = &vst_infos[type];
252     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
253     MemTxResult result;
254 
255     if (!addr) {
256         return -1;
257     }
258 
259     result = address_space_read(&address_space_memory, addr,
260                                 MEMTXATTRS_UNSPECIFIED, data,
261                                 info->size);
262     if (result != MEMTX_OK) {
263         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
264                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
265         return -1;
266     }
267     return 0;
268 }
269 
270 #define XIVE_VST_WORD_ALL -1
271 
272 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
273                                uint32_t idx, void *data, uint32_t word_number)
274 {
275     const XiveVstInfo *info = &vst_infos[type];
276     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
277     MemTxResult result;
278 
279     if (!addr) {
280         return -1;
281     }
282 
283     if (word_number == XIVE_VST_WORD_ALL) {
284         result = address_space_write(&address_space_memory, addr,
285                                      MEMTXATTRS_UNSPECIFIED, data,
286                                      info->size);
287     } else {
288         result = address_space_write(&address_space_memory,
289                                      addr + word_number * 4,
290                                      MEMTXATTRS_UNSPECIFIED,
291                                      data + word_number * 4, 4);
292     }
293 
294     if (result != MEMTX_OK) {
295         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
296                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
297         return -1;
298     }
299     return 0;
300 }
301 
302 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
303                              uint8_t *pq)
304 {
305     PnvXive2 *xive = PNV_XIVE2(xrtr);
306 
307     if (pnv_xive2_block_id(xive) != blk) {
308         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
309         return -1;
310     }
311 
312     *pq = xive_source_esb_get(&xive->ipi_source, idx);
313     return 0;
314 }
315 
316 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
317                              uint8_t *pq)
318 {
319     PnvXive2 *xive = PNV_XIVE2(xrtr);
320 
321     if (pnv_xive2_block_id(xive) != blk) {
322         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
323         return -1;
324     }
325 
326     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
327     return 0;
328 }
329 
330 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
331                              Xive2End *end)
332 {
333     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
334 }
335 
336 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
337                                Xive2End *end, uint8_t word_number)
338 {
339     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
340                               word_number);
341 }
342 
343 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
344 {
345     if (!qtest_enabled()) {
346         PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
347         return ppc_cpu_pir(cpu);
348     }
349     return 0;
350 }
351 
352 /*
353  * After SW injects a Queue Sync or Cache Flush operation, HW will notify
354  * SW of the completion of the operation by writing a byte of all 1's (0xff)
355  * to a specific memory location.  The memory location is calculated by first
356  * looking up a base address in the SYNC VSD using the Topology ID of the
357  * originating thread as the "block" number.  This points to a
358  * 64k block of memory that is further divided into 128 512 byte chunks of
359  * memory, which is indexed by the thread id of the requesting thread.
360  * Finally, this 512 byte chunk of memory is divided into 16 32 byte
361  * chunks which are indexed by the topology id of the targeted IC's chip.
362  * The values below are the offsets into that 32 byte chunk of memory for
363  * each type of cache flush or queue sync operation.
364  */
365 #define PNV_XIVE2_QUEUE_IPI              0x00
366 #define PNV_XIVE2_QUEUE_HW               0x01
367 #define PNV_XIVE2_QUEUE_NXC              0x02
368 #define PNV_XIVE2_QUEUE_INT              0x03
369 #define PNV_XIVE2_QUEUE_OS               0x04
370 #define PNV_XIVE2_QUEUE_POOL             0x05
371 #define PNV_XIVE2_QUEUE_HARD             0x06
372 #define PNV_XIVE2_CACHE_ENDC             0x08
373 #define PNV_XIVE2_CACHE_ESBC             0x09
374 #define PNV_XIVE2_CACHE_EASC             0x0a
375 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO   0x10
376 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO    0x11
377 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI   0x12
378 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI    0x13
379 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI   0x14
380 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI    0x15
381 #define PNV_XIVE2_CACHE_NXC              0x18
382 
383 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
384 {
385     uint64_t addr;
386     int pir = pnv_xive2_get_current_pir(xive);
387     int thread_nr = PNV10_PIR2THREAD(pir);
388     int thread_topo_id = PNV10_PIR2CHIP(pir);
389     int ic_topo_id = xive->chip->chip_id;
390     uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
391     uint8_t byte = 0xff;
392     MemTxResult result;
393 
394     /* Retrieve the address of requesting thread's notification area */
395     addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
396 
397     if (!addr) {
398         xive2_error(xive, "VST: no SYNC entry %x/%x !?",
399                     thread_topo_id, thread_nr);
400         return -1;
401     }
402 
403     address_space_stb(&address_space_memory, addr + offset + type, byte,
404                       MEMTXATTRS_UNSPECIFIED, &result);
405     assert(result == MEMTX_OK);
406 
407     return 0;
408 }
409 
410 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
411 {
412     uint8_t  blk;
413     uint32_t idx;
414     int i, spec_reg, data_reg;
415     uint64_t endc_watch[4];
416 
417     assert(watch_engine < ARRAY_SIZE(endc_watch));
418 
419     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
420     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
421     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
422     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
423 
424     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
425         endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
426     }
427 
428     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
429                               XIVE_VST_WORD_ALL);
430 }
431 
432 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
433 {
434     uint8_t  blk;
435     uint32_t idx;
436     uint64_t endc_watch[4] = { 0 };
437     int i, spec_reg, data_reg;
438 
439     assert(watch_engine < ARRAY_SIZE(endc_watch));
440 
441     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
442     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
443     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
444     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
445 
446     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
447         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
448     }
449 
450     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
451         xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
452     }
453 }
454 
455 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
456                              Xive2Nvp *nvp)
457 {
458     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
459 }
460 
461 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
462                                Xive2Nvp *nvp, uint8_t word_number)
463 {
464     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
465                               word_number);
466 }
467 
468 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
469 {
470     switch (nxc_type) {
471     case PC_NXC_WATCH_NXC_NVP:
472         *table_type = VST_NVP;
473         break;
474     case PC_NXC_WATCH_NXC_NVG:
475         *table_type = VST_NVG;
476         break;
477     case PC_NXC_WATCH_NXC_NVC:
478         *table_type = VST_NVC;
479         break;
480     default:
481         qemu_log_mask(LOG_GUEST_ERROR,
482                       "XIVE: invalid table type for nxc operation\n");
483         return -1;
484     }
485     return 0;
486 }
487 
488 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
489 {
490     uint8_t  blk, nxc_type;
491     uint32_t idx, table_type = -1;
492     int i, spec_reg, data_reg;
493     uint64_t nxc_watch[4];
494 
495     assert(watch_engine < ARRAY_SIZE(nxc_watch));
496 
497     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
498     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
499     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
500     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
501     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
502 
503     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
504 
505     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
506         nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
507     }
508 
509     return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
510                               XIVE_VST_WORD_ALL);
511 }
512 
513 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
514 {
515     uint8_t  blk, nxc_type;
516     uint32_t idx, table_type = -1;
517     uint64_t nxc_watch[4] = { 0 };
518     int i, spec_reg, data_reg;
519 
520     assert(watch_engine < ARRAY_SIZE(nxc_watch));
521 
522     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
523     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
524     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
525     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
526     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
527 
528     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
529 
530     if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
531         xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
532                     blk, idx, vst_infos[table_type].name);
533     }
534 
535     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
536         xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
537     }
538 }
539 
540 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
541                             Xive2Eas *eas)
542 {
543     PnvXive2 *xive = PNV_XIVE2(xrtr);
544 
545     if (pnv_xive2_block_id(xive) != blk) {
546         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
547         return -1;
548     }
549 
550     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
551 }
552 
553 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
554 {
555     PnvXive2 *xive = PNV_XIVE2(xrtr);
556     uint32_t cfg = 0;
557 
558     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
559         cfg |= XIVE2_GEN1_TIMA_OS;
560     }
561 
562     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
563         cfg |= XIVE2_VP_SAVE_RESTORE;
564     }
565 
566     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
567               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
568         cfg |= XIVE2_THREADID_8BITS;
569     }
570 
571     return cfg;
572 }
573 
574 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
575 {
576     int pir = ppc_cpu_pir(cpu);
577     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
578     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
579     uint32_t bit = pir & 0x3f;
580 
581     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
582 }
583 
584 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
585                                uint8_t nvt_blk, uint32_t nvt_idx,
586                                bool cam_ignore, uint8_t priority,
587                                uint32_t logic_serv, XiveTCTXMatch *match)
588 {
589     PnvXive2 *xive = PNV_XIVE2(xptr);
590     PnvChip *chip = xive->chip;
591     int count = 0;
592     int i, j;
593     bool gen1_tima_os =
594         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
595 
596     for (i = 0; i < chip->nr_cores; i++) {
597         PnvCore *pc = chip->cores[i];
598         CPUCore *cc = CPU_CORE(pc);
599 
600         for (j = 0; j < cc->nr_threads; j++) {
601             PowerPCCPU *cpu = pc->threads[j];
602             XiveTCTX *tctx;
603             int ring;
604 
605             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
606                 continue;
607             }
608 
609             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
610 
611             if (gen1_tima_os) {
612                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
613                                                  nvt_idx, cam_ignore,
614                                                  logic_serv);
615             } else {
616                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
617                                                    nvt_idx, cam_ignore,
618                                                    logic_serv);
619             }
620 
621             /*
622              * Save the context and follow on to catch duplicates,
623              * that we don't support yet.
624              */
625             if (ring != -1) {
626                 if (match->tctx) {
627                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
628                                   "thread context NVT %x/%x\n",
629                                   nvt_blk, nvt_idx);
630                     return false;
631                 }
632 
633                 match->ring = ring;
634                 match->tctx = tctx;
635                 count++;
636             }
637         }
638     }
639 
640     return count;
641 }
642 
643 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
644 {
645     PnvXive2 *xive = PNV_XIVE2(xptr);
646     uint32_t cfg = 0;
647 
648     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
649         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
650     }
651     return cfg;
652 }
653 
654 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
655 {
656     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
657 }
658 
659 /*
660  * The TIMA MMIO space is shared among the chips and to identify the
661  * chip from which the access is being done, we extract the chip id
662  * from the PIR.
663  */
664 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
665 {
666     int pir = ppc_cpu_pir(cpu);
667     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
668     PnvXive2 *xive = PNV_XIVE2(xptr);
669 
670     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
671         xive2_error(xive, "IC: CPU %x is not enabled", pir);
672     }
673     return xive;
674 }
675 
676 /*
677  * The internal sources of the interrupt controller have no knowledge
678  * of the XIVE2 chip on which they reside. Encode the block id in the
679  * source interrupt number before forwarding the source event
680  * notification to the Router. This is required on a multichip system.
681  */
682 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
683 {
684     PnvXive2 *xive = PNV_XIVE2(xn);
685     uint8_t blk = pnv_xive2_block_id(xive);
686 
687     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
688 }
689 
690 /*
691  * Set Translation Tables
692  *
693  * TODO add support for multiple sets
694  */
695 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
696 {
697     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
698     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
699                                   xive->cq_regs[CQ_TAR >> 3]);
700 
701     switch (tsel) {
702     case CQ_TAR_NVPG:
703     case CQ_TAR_ESB:
704     case CQ_TAR_END:
705         xive->tables[tsel][entry] = val;
706         break;
707     default:
708         xive2_error(xive, "IC: unsupported table %d", tsel);
709         return -1;
710     }
711 
712     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
713         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
714                      xive->cq_regs[CQ_TAR >> 3], ++entry);
715     }
716 
717     return 0;
718 }
719 /*
720  * Virtual Structure Tables (VST) configuration
721  */
722 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
723                                         uint8_t blk, uint64_t vsd)
724 {
725     Xive2EndSource *end_xsrc = &xive->end_source;
726     XiveSource *xsrc = &xive->ipi_source;
727     const XiveVstInfo *info = &vst_infos[type];
728     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
729     uint64_t vst_tsize = 1ull << page_shift;
730     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
731 
732     /* Basic checks */
733 
734     if (VSD_INDIRECT & vsd) {
735         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
736             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
737                        page_shift);
738             return;
739         }
740     }
741 
742     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
743         xive2_error(xive, "VST: %s table address 0x%"PRIx64
744                     " is not aligned with page shift %d",
745                     info->name, vst_addr, page_shift);
746         return;
747     }
748 
749     /* Record the table configuration (in SRAM on HW) */
750     xive->vsds[type][blk] = vsd;
751 
752     /* Now tune the models with the configuration provided by the FW */
753 
754     switch (type) {
755     case VST_ESB:
756         /*
757          * Backing store pages for the source PQ bits. The model does
758          * not use these PQ bits backed in RAM because the XiveSource
759          * model has its own.
760          *
761          * If the table is direct, we can compute the number of PQ
762          * entries provisioned by FW (such as skiboot) and resize the
763          * ESB window accordingly.
764          */
765         if (!(VSD_INDIRECT & vsd)) {
766             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
767                                    * (1ull << xsrc->esb_shift));
768         }
769 
770         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
771         break;
772 
773     case VST_EAS:  /* Nothing to be done */
774         break;
775 
776     case VST_END:
777         /*
778          * Backing store pages for the END.
779          */
780         if (!(VSD_INDIRECT & vsd)) {
781             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
782                                    * (1ull << end_xsrc->esb_shift));
783         }
784         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
785         break;
786 
787     case VST_NVP:  /* Not modeled */
788     case VST_NVG:  /* Not modeled */
789     case VST_NVC:  /* Not modeled */
790     case VST_IC:   /* Not modeled */
791     case VST_SYNC: /* Not modeled */
792     case VST_ERQ:  /* Not modeled */
793         break;
794 
795     default:
796         g_assert_not_reached();
797     }
798 }
799 
800 /*
801  * Both PC and VC sub-engines are configured as each use the Virtual
802  * Structure Tables
803  */
804 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
805 {
806     uint8_t mode = GETFIELD(VSD_MODE, vsd);
807     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
808                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
809     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
810                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
811     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
812 
813     if (type > VST_ERQ) {
814         xive2_error(xive, "VST: invalid table type %d", type);
815         return;
816     }
817 
818     if (blk >= vst_infos[type].max_blocks) {
819         xive2_error(xive, "VST: invalid block id %d for"
820                       " %s table", blk, vst_infos[type].name);
821         return;
822     }
823 
824     if (!vst_addr) {
825         xive2_error(xive, "VST: invalid %s table address",
826                    vst_infos[type].name);
827         return;
828     }
829 
830     switch (mode) {
831     case VSD_MODE_FORWARD:
832         xive->vsds[type][blk] = vsd;
833         break;
834 
835     case VSD_MODE_EXCLUSIVE:
836         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
837         break;
838 
839     default:
840         xive2_error(xive, "VST: unsupported table mode %d", mode);
841         return;
842     }
843 }
844 
845 /*
846  * MMIO handlers
847  */
848 
849 
850 /*
851  * IC BAR layout
852  *
853  * Page 0: Internal CQ register accesses (reads & writes)
854  * Page 1: Internal PC register accesses (reads & writes)
855  * Page 2: Internal VC register accesses (reads & writes)
856  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
857  * Page 4: Notify Port page (writes only, w/data),
858  * Page 5: Reserved
859  * Page 6: Sync Poll page (writes only, dataless)
860  * Page 7: Sync Inject page (writes only, dataless)
861  * Page 8: LSI Trigger page (writes only, dataless)
862  * Page 9: LSI SB Management page (reads & writes dataless)
863  * Pages 10-255: Reserved
864  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
865  *                covering the 128 threads in P10.
866  * Pages 384-511: Reserved
867  */
868 typedef struct PnvXive2Region {
869     const char *name;
870     uint32_t pgoff;
871     uint32_t pgsize;
872     const MemoryRegionOps *ops;
873 } PnvXive2Region;
874 
875 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
876 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
877 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
878 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
879 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
880 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
881 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
882 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
883 
884 /* 512 pages. 4K: 2M range, 64K: 32M range */
885 static const PnvXive2Region pnv_xive2_ic_regions[] = {
886     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
887     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
888     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
889     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
890     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
891     /* page 5 reserved */
892     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
893     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
894     /* pages 10-255 reserved */
895     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
896     /* pages 384-511 reserved */
897 };
898 
899 /*
900  * CQ operations
901  */
902 
903 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
904                                         unsigned size)
905 {
906     PnvXive2 *xive = PNV_XIVE2(opaque);
907     uint32_t reg = offset >> 3;
908     uint64_t val = 0;
909 
910     switch (offset) {
911     case CQ_XIVE_CAP: /* Set at reset */
912     case CQ_XIVE_CFG:
913         val = xive->cq_regs[reg];
914         break;
915     case CQ_MSGSND: /* TODO check the #cores of the machine */
916         val = 0xffffffff00000000;
917         break;
918     case CQ_CFG_PB_GEN:
919         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
920         break;
921     default:
922         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
923     }
924 
925     return val;
926 }
927 
928 static uint64_t pnv_xive2_bar_size(uint64_t val)
929 {
930     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
931 }
932 
933 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
934                                   uint64_t val, unsigned size)
935 {
936     PnvXive2 *xive = PNV_XIVE2(opaque);
937     MemoryRegion *sysmem = get_system_memory();
938     uint32_t reg = offset >> 3;
939     int i;
940 
941     switch (offset) {
942     case CQ_XIVE_CFG:
943     case CQ_RST_CTL: /* TODO: reset all BARs */
944         break;
945 
946     case CQ_IC_BAR:
947         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
948         if (!(val & CQ_IC_BAR_VALID)) {
949             xive->ic_base = 0;
950             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
951                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
952                     memory_region_del_subregion(&xive->ic_mmio,
953                                                 &xive->ic_mmios[i]);
954                 }
955                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
956             }
957         } else {
958             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
959             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
960                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
961                     memory_region_add_subregion(&xive->ic_mmio,
962                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
963                                &xive->ic_mmios[i]);
964                 }
965                 memory_region_add_subregion(sysmem, xive->ic_base,
966                                             &xive->ic_mmio);
967             }
968         }
969         break;
970 
971     case CQ_TM_BAR:
972         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
973         if (!(val & CQ_TM_BAR_VALID)) {
974             xive->tm_base = 0;
975             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
976                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
977             }
978         } else {
979             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
980             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
981                 memory_region_add_subregion(sysmem, xive->tm_base,
982                                             &xive->tm_mmio);
983             }
984         }
985         break;
986 
987     case CQ_ESB_BAR:
988         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
989         if (!(val & CQ_BAR_VALID)) {
990             xive->esb_base = 0;
991             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
992                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
993             }
994         } else {
995             xive->esb_base = val & CQ_BAR_ADDR;
996             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
997                 memory_region_set_size(&xive->esb_mmio,
998                                        pnv_xive2_bar_size(val));
999                 memory_region_add_subregion(sysmem, xive->esb_base,
1000                                             &xive->esb_mmio);
1001             }
1002         }
1003         break;
1004 
1005     case CQ_END_BAR:
1006         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1007         if (!(val & CQ_BAR_VALID)) {
1008             xive->end_base = 0;
1009             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1010                 memory_region_del_subregion(sysmem, &xive->end_mmio);
1011             }
1012         } else {
1013             xive->end_base = val & CQ_BAR_ADDR;
1014             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1015                 memory_region_set_size(&xive->end_mmio,
1016                                        pnv_xive2_bar_size(val));
1017                 memory_region_add_subregion(sysmem, xive->end_base,
1018                                             &xive->end_mmio);
1019             }
1020         }
1021         break;
1022 
1023     case CQ_NVC_BAR:
1024         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1025         if (!(val & CQ_BAR_VALID)) {
1026             xive->nvc_base = 0;
1027             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1028                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1029             }
1030         } else {
1031             xive->nvc_base = val & CQ_BAR_ADDR;
1032             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1033                 memory_region_set_size(&xive->nvc_mmio,
1034                                        pnv_xive2_bar_size(val));
1035                 memory_region_add_subregion(sysmem, xive->nvc_base,
1036                                             &xive->nvc_mmio);
1037             }
1038         }
1039         break;
1040 
1041     case CQ_NVPG_BAR:
1042         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1043         if (!(val & CQ_BAR_VALID)) {
1044             xive->nvpg_base = 0;
1045             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1046                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1047             }
1048         } else {
1049             xive->nvpg_base = val & CQ_BAR_ADDR;
1050             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1051                 memory_region_set_size(&xive->nvpg_mmio,
1052                                        pnv_xive2_bar_size(val));
1053                 memory_region_add_subregion(sysmem, xive->nvpg_base,
1054                                             &xive->nvpg_mmio);
1055             }
1056         }
1057         break;
1058 
1059     case CQ_TAR: /* Set Translation Table Address */
1060         break;
1061     case CQ_TDR: /* Set Translation Table Data */
1062         pnv_xive2_stt_set_data(xive, val);
1063         break;
1064     case CQ_FIRMASK_OR: /* FIR error reporting */
1065         break;
1066     default:
1067         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
1068         return;
1069     }
1070 
1071     xive->cq_regs[reg] = val;
1072 }
1073 
1074 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1075     .read = pnv_xive2_ic_cq_read,
1076     .write = pnv_xive2_ic_cq_write,
1077     .endianness = DEVICE_BIG_ENDIAN,
1078     .valid = {
1079         .min_access_size = 8,
1080         .max_access_size = 8,
1081     },
1082     .impl = {
1083         .min_access_size = 8,
1084         .max_access_size = 8,
1085     },
1086 };
1087 
1088 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1089                                             uint64_t *state)
1090 {
1091     uint8_t val = 0xFF;
1092     int i;
1093 
1094     for (i = 3; i >= 0; i--) {
1095         if (BIT(i) & engine_mask) {
1096             if (!(BIT(i) & *state)) {
1097                 *state |= BIT(i);
1098                 val = 3 - i;
1099                 break;
1100             }
1101         }
1102     }
1103     return val;
1104 }
1105 
1106 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1107 {
1108     uint8_t engine_bit = 3 - watch_engine;
1109 
1110     if (*state & BIT(engine_bit)) {
1111         *state &= ~BIT(engine_bit);
1112     }
1113 }
1114 
1115 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1116 {
1117     uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1118                                     xive->vc_regs[VC_ENDC_CFG >> 3]);
1119     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1120     uint8_t val;
1121 
1122     /*
1123      * We keep track of which engines are currently busy in the
1124      * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1125      * the register, we don't return its value but the ID of an engine
1126      * it can use.
1127      * There are 4 engines. 0xFF means no engine is available.
1128      */
1129     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1130     if (val != 0xFF) {
1131         xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1132     }
1133     return val;
1134 }
1135 
1136 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1137                                                uint8_t watch_engine)
1138 {
1139     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1140 
1141     pnv_xive2_cache_watch_release(&state, watch_engine);
1142     xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1143 }
1144 
1145 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1146                                      unsigned size)
1147 {
1148     PnvXive2 *xive = PNV_XIVE2(opaque);
1149     uint64_t val = 0;
1150     uint32_t reg = offset >> 3;
1151     uint8_t watch_engine;
1152 
1153     switch (offset) {
1154     /*
1155      * VSD table settings.
1156      */
1157     case VC_VSD_TABLE_ADDR:
1158     case VC_VSD_TABLE_DATA:
1159         val = xive->vc_regs[reg];
1160         break;
1161 
1162     /*
1163      * ESB cache updates (not modeled)
1164      */
1165     case VC_ESBC_FLUSH_CTRL:
1166         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1167         val = xive->vc_regs[reg];
1168         break;
1169 
1170     case VC_ESBC_CFG:
1171         val = xive->vc_regs[reg];
1172         break;
1173 
1174     /*
1175      * EAS cache updates (not modeled)
1176      */
1177     case VC_EASC_FLUSH_CTRL:
1178         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1179         val = xive->vc_regs[reg];
1180         break;
1181 
1182     case VC_ENDC_WATCH_ASSIGN:
1183         val = pnv_xive2_endc_cache_watch_assign(xive);
1184         break;
1185 
1186     case VC_ENDC_CFG:
1187         val = xive->vc_regs[reg];
1188         break;
1189 
1190     /*
1191      * END cache updates
1192      */
1193     case VC_ENDC_WATCH0_SPEC:
1194     case VC_ENDC_WATCH1_SPEC:
1195     case VC_ENDC_WATCH2_SPEC:
1196     case VC_ENDC_WATCH3_SPEC:
1197         watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1198         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1199         pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1200         val = xive->vc_regs[reg];
1201         break;
1202 
1203     case VC_ENDC_WATCH0_DATA0:
1204     case VC_ENDC_WATCH1_DATA0:
1205     case VC_ENDC_WATCH2_DATA0:
1206     case VC_ENDC_WATCH3_DATA0:
1207         /*
1208          * Load DATA registers from cache with data requested by the
1209          * SPEC register
1210          */
1211         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1212         pnv_xive2_end_cache_load(xive, watch_engine);
1213         val = xive->vc_regs[reg];
1214         break;
1215 
1216     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1217     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1218     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1219     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1220         val = xive->vc_regs[reg];
1221         break;
1222 
1223     case VC_ENDC_FLUSH_CTRL:
1224         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1225         val = xive->vc_regs[reg];
1226         break;
1227 
1228     /*
1229      * Indirect invalidation
1230      */
1231     case VC_AT_MACRO_KILL_MASK:
1232         val = xive->vc_regs[reg];
1233         break;
1234 
1235     case VC_AT_MACRO_KILL:
1236         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1237         val = xive->vc_regs[reg];
1238         break;
1239 
1240     /*
1241      * Interrupt fifo overflow in memory backing store (Not modeled)
1242      */
1243     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1244         val = xive->vc_regs[reg];
1245         break;
1246 
1247     /*
1248      * Synchronisation
1249      */
1250     case VC_ENDC_SYNC_DONE:
1251         val = VC_ENDC_SYNC_POLL_DONE;
1252         break;
1253     default:
1254         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1255     }
1256 
1257     return val;
1258 }
1259 
1260 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1261                                   uint64_t val, unsigned size)
1262 {
1263     PnvXive2 *xive = PNV_XIVE2(opaque);
1264     uint32_t reg = offset >> 3;
1265     uint8_t watch_engine;
1266 
1267     switch (offset) {
1268     /*
1269      * VSD table settings.
1270      */
1271     case VC_VSD_TABLE_ADDR:
1272        break;
1273     case VC_VSD_TABLE_DATA:
1274         pnv_xive2_vst_set_data(xive, val);
1275         break;
1276 
1277     /*
1278      * ESB cache updates (not modeled)
1279      */
1280     /* case VC_ESBC_FLUSH_CTRL: */
1281     case VC_ESBC_FLUSH_POLL:
1282         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1283         /* ESB update */
1284         break;
1285 
1286     case VC_ESBC_FLUSH_INJECT:
1287         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1288         break;
1289 
1290     case VC_ESBC_CFG:
1291         break;
1292 
1293     /*
1294      * EAS cache updates (not modeled)
1295      */
1296     /* case VC_EASC_FLUSH_CTRL: */
1297     case VC_EASC_FLUSH_POLL:
1298         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1299         /* EAS update */
1300         break;
1301 
1302     case VC_EASC_FLUSH_INJECT:
1303         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1304         break;
1305 
1306     case VC_ENDC_CFG:
1307         break;
1308 
1309     /*
1310      * END cache updates
1311      */
1312     case VC_ENDC_WATCH0_SPEC:
1313     case VC_ENDC_WATCH1_SPEC:
1314     case VC_ENDC_WATCH2_SPEC:
1315     case VC_ENDC_WATCH3_SPEC:
1316          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1317         break;
1318 
1319     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1320     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1321     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1322     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1323         break;
1324     case VC_ENDC_WATCH0_DATA0:
1325     case VC_ENDC_WATCH1_DATA0:
1326     case VC_ENDC_WATCH2_DATA0:
1327     case VC_ENDC_WATCH3_DATA0:
1328         /* writing to DATA0 triggers the cache write */
1329         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1330         xive->vc_regs[reg] = val;
1331         pnv_xive2_end_update(xive, watch_engine);
1332         break;
1333 
1334 
1335     /* case VC_ENDC_FLUSH_CTRL: */
1336     case VC_ENDC_FLUSH_POLL:
1337         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1338         break;
1339 
1340     case VC_ENDC_FLUSH_INJECT:
1341         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1342         break;
1343 
1344     /*
1345      * Indirect invalidation
1346      */
1347     case VC_AT_MACRO_KILL:
1348     case VC_AT_MACRO_KILL_MASK:
1349         break;
1350 
1351     /*
1352      * Interrupt fifo overflow in memory backing store (Not modeled)
1353      */
1354     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1355         break;
1356 
1357     /*
1358      * Synchronisation
1359      */
1360     case VC_ENDC_SYNC_DONE:
1361         break;
1362 
1363     default:
1364         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1365         return;
1366     }
1367 
1368     xive->vc_regs[reg] = val;
1369 }
1370 
1371 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1372     .read = pnv_xive2_ic_vc_read,
1373     .write = pnv_xive2_ic_vc_write,
1374     .endianness = DEVICE_BIG_ENDIAN,
1375     .valid = {
1376         .min_access_size = 8,
1377         .max_access_size = 8,
1378     },
1379     .impl = {
1380         .min_access_size = 8,
1381         .max_access_size = 8,
1382     },
1383 };
1384 
1385 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1386 {
1387     uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1388                                     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1389     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1390     uint8_t val;
1391 
1392     /*
1393      * We keep track of which engines are currently busy in the
1394      * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1395      * the register, we don't return its value but the ID of an engine
1396      * it can use.
1397      * There are 4 engines. 0xFF means no engine is available.
1398      */
1399     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1400     if (val != 0xFF) {
1401         xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1402     }
1403     return val;
1404 }
1405 
1406 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1407                                               uint8_t watch_engine)
1408 {
1409     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1410 
1411     pnv_xive2_cache_watch_release(&state, watch_engine);
1412     xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1413 }
1414 
1415 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1416                                      unsigned size)
1417 {
1418     PnvXive2 *xive = PNV_XIVE2(opaque);
1419     uint64_t val = -1;
1420     uint32_t reg = offset >> 3;
1421     uint8_t watch_engine;
1422 
1423     switch (offset) {
1424     /*
1425      * VSD table settings.
1426      */
1427     case PC_VSD_TABLE_ADDR:
1428     case PC_VSD_TABLE_DATA:
1429         val = xive->pc_regs[reg];
1430         break;
1431 
1432     case PC_NXC_WATCH_ASSIGN:
1433         val = pnv_xive2_nxc_cache_watch_assign(xive);
1434         break;
1435 
1436     case PC_NXC_PROC_CONFIG:
1437         val = xive->pc_regs[reg];
1438         break;
1439 
1440     /*
1441      * cache updates
1442      */
1443     case PC_NXC_WATCH0_SPEC:
1444     case PC_NXC_WATCH1_SPEC:
1445     case PC_NXC_WATCH2_SPEC:
1446     case PC_NXC_WATCH3_SPEC:
1447         watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1448         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1449         pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1450         val = xive->pc_regs[reg];
1451         break;
1452 
1453     case PC_NXC_WATCH0_DATA0:
1454     case PC_NXC_WATCH1_DATA0:
1455     case PC_NXC_WATCH2_DATA0:
1456     case PC_NXC_WATCH3_DATA0:
1457        /*
1458         * Load DATA registers from cache with data requested by the
1459         * SPEC register
1460         */
1461         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1462         pnv_xive2_nxc_cache_load(xive, watch_engine);
1463         val = xive->pc_regs[reg];
1464         break;
1465 
1466     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1467     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1468     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1469     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1470         val = xive->pc_regs[reg];
1471         break;
1472 
1473     case PC_NXC_FLUSH_CTRL:
1474         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1475         val = xive->pc_regs[reg];
1476         break;
1477 
1478     /*
1479      * Indirect invalidation
1480      */
1481     case PC_AT_KILL:
1482         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1483         val = xive->pc_regs[reg];
1484         break;
1485 
1486     default:
1487         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1488     }
1489 
1490     return val;
1491 }
1492 
1493 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1494                                   uint64_t val, unsigned size)
1495 {
1496     PnvXive2 *xive = PNV_XIVE2(opaque);
1497     uint32_t reg = offset >> 3;
1498     uint8_t watch_engine;
1499 
1500     switch (offset) {
1501 
1502     /*
1503      * VSD table settings. Only taken into account in the VC
1504      * sub-engine because the Xive2Router model combines both VC and PC
1505      * sub-engines
1506      */
1507     case PC_VSD_TABLE_ADDR:
1508     case PC_VSD_TABLE_DATA:
1509         break;
1510 
1511     case PC_NXC_PROC_CONFIG:
1512         break;
1513 
1514     /*
1515      * cache updates
1516      */
1517     case PC_NXC_WATCH0_SPEC:
1518     case PC_NXC_WATCH1_SPEC:
1519     case PC_NXC_WATCH2_SPEC:
1520     case PC_NXC_WATCH3_SPEC:
1521         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1522         break;
1523 
1524     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1525     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1526     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1527     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1528         break;
1529     case PC_NXC_WATCH0_DATA0:
1530     case PC_NXC_WATCH1_DATA0:
1531     case PC_NXC_WATCH2_DATA0:
1532     case PC_NXC_WATCH3_DATA0:
1533         /* writing to DATA0 triggers the cache write */
1534         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1535         xive->pc_regs[reg] = val;
1536         pnv_xive2_nxc_update(xive, watch_engine);
1537         break;
1538 
1539    /* case PC_NXC_FLUSH_CTRL: */
1540     case PC_NXC_FLUSH_POLL:
1541         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1542         break;
1543 
1544     case PC_NXC_FLUSH_INJECT:
1545         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1546         break;
1547 
1548     /*
1549      * Indirect invalidation
1550      */
1551     case PC_AT_KILL:
1552     case PC_AT_KILL_MASK:
1553         break;
1554 
1555     default:
1556         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1557         return;
1558     }
1559 
1560     xive->pc_regs[reg] = val;
1561 }
1562 
1563 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1564     .read = pnv_xive2_ic_pc_read,
1565     .write = pnv_xive2_ic_pc_write,
1566     .endianness = DEVICE_BIG_ENDIAN,
1567     .valid = {
1568         .min_access_size = 8,
1569         .max_access_size = 8,
1570     },
1571     .impl = {
1572         .min_access_size = 8,
1573         .max_access_size = 8,
1574     },
1575 };
1576 
1577 
1578 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1579                                         unsigned size)
1580 {
1581     PnvXive2 *xive = PNV_XIVE2(opaque);
1582     uint64_t val = -1;
1583     uint32_t reg = offset >> 3;
1584 
1585     switch (offset) {
1586     /*
1587      * XIVE2 hardware thread enablement
1588      */
1589     case TCTXT_EN0:
1590     case TCTXT_EN1:
1591         val = xive->tctxt_regs[reg];
1592         break;
1593 
1594     case TCTXT_EN0_SET:
1595     case TCTXT_EN0_RESET:
1596         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1597         break;
1598     case TCTXT_EN1_SET:
1599     case TCTXT_EN1_RESET:
1600         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1601         break;
1602     case TCTXT_CFG:
1603         val = xive->tctxt_regs[reg];
1604         break;
1605     default:
1606         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1607     }
1608 
1609     return val;
1610 }
1611 
1612 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1613                                      uint64_t val, unsigned size)
1614 {
1615     PnvXive2 *xive = PNV_XIVE2(opaque);
1616     uint32_t reg = offset >> 3;
1617 
1618     switch (offset) {
1619     /*
1620      * XIVE2 hardware thread enablement
1621      */
1622     case TCTXT_EN0: /* Physical Thread Enable */
1623     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1624         xive->tctxt_regs[reg] = val;
1625         break;
1626 
1627     case TCTXT_EN0_SET:
1628         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1629         break;
1630     case TCTXT_EN1_SET:
1631         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1632         break;
1633     case TCTXT_EN0_RESET:
1634         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1635         break;
1636     case TCTXT_EN1_RESET:
1637         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1638         break;
1639     case TCTXT_CFG:
1640         xive->tctxt_regs[reg] = val;
1641         break;
1642     default:
1643         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1644         return;
1645     }
1646 }
1647 
1648 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1649     .read = pnv_xive2_ic_tctxt_read,
1650     .write = pnv_xive2_ic_tctxt_write,
1651     .endianness = DEVICE_BIG_ENDIAN,
1652     .valid = {
1653         .min_access_size = 8,
1654         .max_access_size = 8,
1655     },
1656     .impl = {
1657         .min_access_size = 8,
1658         .max_access_size = 8,
1659     },
1660 };
1661 
1662 /*
1663  * Redirect XSCOM to MMIO handlers
1664  */
1665 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1666                                      unsigned size)
1667 {
1668     PnvXive2 *xive = PNV_XIVE2(opaque);
1669     uint64_t val = -1;
1670     uint32_t xscom_reg = offset >> 3;
1671     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1672 
1673     switch (xscom_reg) {
1674     case 0x000 ... 0x0FF:
1675         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1676         break;
1677     case 0x100 ... 0x1FF:
1678         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1679         break;
1680     case 0x200 ... 0x2FF:
1681         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1682         break;
1683     case 0x300 ... 0x3FF:
1684         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1685         break;
1686     default:
1687         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1688     }
1689 
1690     return val;
1691 }
1692 
1693 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1694                                   uint64_t val, unsigned size)
1695 {
1696     PnvXive2 *xive = PNV_XIVE2(opaque);
1697     uint32_t xscom_reg = offset >> 3;
1698     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1699 
1700     switch (xscom_reg) {
1701     case 0x000 ... 0x0FF:
1702         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1703         break;
1704     case 0x100 ... 0x1FF:
1705         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1706         break;
1707     case 0x200 ... 0x2FF:
1708         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1709         break;
1710     case 0x300 ... 0x3FF:
1711         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1712         break;
1713     default:
1714         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1715     }
1716 }
1717 
1718 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1719     .read = pnv_xive2_xscom_read,
1720     .write = pnv_xive2_xscom_write,
1721     .endianness = DEVICE_BIG_ENDIAN,
1722     .valid = {
1723         .min_access_size = 8,
1724         .max_access_size = 8,
1725     },
1726     .impl = {
1727         .min_access_size = 8,
1728         .max_access_size = 8,
1729     },
1730 };
1731 
1732 /*
1733  * Notify port page. The layout is compatible between 4K and 64K pages :
1734  *
1735  * Page 1           Notify page (writes only)
1736  *  0x000 - 0x7FF   IPI interrupt (NPU)
1737  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1738  */
1739 
1740 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1741                                     uint64_t val)
1742 {
1743     uint8_t blk;
1744     uint32_t idx;
1745 
1746     if (val & XIVE_TRIGGER_END) {
1747         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1748                    addr, val);
1749         return;
1750     }
1751 
1752     /*
1753      * Forward the source event notification directly to the Router.
1754      * The source interrupt number should already be correctly encoded
1755      * with the chip block id by the sending device (PHB, PSI).
1756      */
1757     blk = XIVE_EAS_BLOCK(val);
1758     idx = XIVE_EAS_INDEX(val);
1759 
1760     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1761                          !!(val & XIVE_TRIGGER_PQ));
1762 }
1763 
1764 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1765                                       uint64_t val, unsigned size)
1766 {
1767     PnvXive2 *xive = PNV_XIVE2(opaque);
1768 
1769     /* VC: IPI triggers */
1770     switch (offset) {
1771     case 0x000 ... 0x7FF:
1772         /* TODO: check IPI notify sub-page routing */
1773         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1774         break;
1775 
1776     /* VC: HW triggers */
1777     case 0x800 ... 0xFFF:
1778         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1779         break;
1780 
1781     default:
1782         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1783     }
1784 }
1785 
1786 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1787                                          unsigned size)
1788 {
1789     PnvXive2 *xive = PNV_XIVE2(opaque);
1790 
1791    /* loads are invalid */
1792     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1793     return -1;
1794 }
1795 
1796 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1797     .read = pnv_xive2_ic_notify_read,
1798     .write = pnv_xive2_ic_notify_write,
1799     .endianness = DEVICE_BIG_ENDIAN,
1800     .valid = {
1801         .min_access_size = 8,
1802         .max_access_size = 8,
1803     },
1804     .impl = {
1805         .min_access_size = 8,
1806         .max_access_size = 8,
1807     },
1808 };
1809 
1810 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1811                                       unsigned size)
1812 {
1813     PnvXive2 *xive = PNV_XIVE2(opaque);
1814 
1815     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1816     return -1;
1817 }
1818 
1819 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1820                                    uint64_t val, unsigned size)
1821 {
1822     PnvXive2 *xive = PNV_XIVE2(opaque);
1823 
1824     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1825 }
1826 
1827 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1828     .read = pnv_xive2_ic_lsi_read,
1829     .write = pnv_xive2_ic_lsi_write,
1830     .endianness = DEVICE_BIG_ENDIAN,
1831     .valid = {
1832         .min_access_size = 8,
1833         .max_access_size = 8,
1834     },
1835     .impl = {
1836         .min_access_size = 8,
1837         .max_access_size = 8,
1838     },
1839 };
1840 
1841 /*
1842  * Sync MMIO page (write only)
1843  */
1844 #define PNV_XIVE2_SYNC_IPI              0x000
1845 #define PNV_XIVE2_SYNC_HW               0x080
1846 #define PNV_XIVE2_SYNC_NxC              0x100
1847 #define PNV_XIVE2_SYNC_INT              0x180
1848 #define PNV_XIVE2_SYNC_OS_ESC           0x200
1849 #define PNV_XIVE2_SYNC_POOL_ESC         0x280
1850 #define PNV_XIVE2_SYNC_HARD_ESC         0x300
1851 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO   0x800
1852 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO    0x880
1853 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI   0x900
1854 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI    0x980
1855 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI   0xA00
1856 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI    0xA80
1857 
1858 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1859                                        unsigned size)
1860 {
1861     PnvXive2 *xive = PNV_XIVE2(opaque);
1862 
1863     /* loads are invalid */
1864     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1865     return -1;
1866 }
1867 
1868 /*
1869  * The sync MMIO space spans two pages.  The lower page is use for
1870  * queue sync "poll" requests while the upper page is used for queue
1871  * sync "inject" requests.  Inject requests require the HW to write
1872  * a byte of all 1's to a predetermined location in memory in order
1873  * to signal completion of the request.  Both pages have the same
1874  * layout, so it is easiest to handle both with a single function.
1875  */
1876 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1877                                     uint64_t val, unsigned size)
1878 {
1879     PnvXive2 *xive = PNV_XIVE2(opaque);
1880     int inject_type;
1881     hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
1882 
1883     /* adjust offset for inject page */
1884     hwaddr adj_offset = offset & pg_offset_mask;
1885 
1886     switch (adj_offset) {
1887     case PNV_XIVE2_SYNC_IPI:
1888         inject_type = PNV_XIVE2_QUEUE_IPI;
1889         break;
1890     case PNV_XIVE2_SYNC_HW:
1891         inject_type = PNV_XIVE2_QUEUE_HW;
1892         break;
1893     case PNV_XIVE2_SYNC_NxC:
1894         inject_type = PNV_XIVE2_QUEUE_NXC;
1895         break;
1896     case PNV_XIVE2_SYNC_INT:
1897         inject_type = PNV_XIVE2_QUEUE_INT;
1898         break;
1899     case PNV_XIVE2_SYNC_OS_ESC:
1900         inject_type = PNV_XIVE2_QUEUE_OS;
1901         break;
1902     case PNV_XIVE2_SYNC_POOL_ESC:
1903         inject_type = PNV_XIVE2_QUEUE_POOL;
1904         break;
1905     case PNV_XIVE2_SYNC_HARD_ESC:
1906         inject_type = PNV_XIVE2_QUEUE_HARD;
1907         break;
1908     case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
1909         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
1910         break;
1911     case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
1912         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
1913         break;
1914     case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
1915         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
1916         break;
1917     case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
1918         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
1919         break;
1920     case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
1921         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
1922         break;
1923     case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
1924         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
1925         break;
1926     default:
1927         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1928         return;
1929     }
1930 
1931     /* Write Queue Sync notification byte if writing to sync inject page */
1932     if ((offset & ~pg_offset_mask) != 0) {
1933         pnv_xive2_inject_notify(xive, inject_type);
1934     }
1935 }
1936 
1937 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1938     .read = pnv_xive2_ic_sync_read,
1939     .write = pnv_xive2_ic_sync_write,
1940     .endianness = DEVICE_BIG_ENDIAN,
1941     .valid = {
1942         .min_access_size = 8,
1943         .max_access_size = 8,
1944     },
1945     .impl = {
1946         .min_access_size = 8,
1947         .max_access_size = 8,
1948     },
1949 };
1950 
1951 /*
1952  * When the TM direct pages of the IC controller are accessed, the
1953  * target HW thread is deduced from the page offset.
1954  */
1955 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
1956 {
1957     /* On P10, the node ID shift in the PIR register is 8 bits */
1958     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
1959 }
1960 
1961 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
1962                                                    hwaddr offset)
1963 {
1964     /*
1965      * Indirect TIMA accesses are similar to direct accesses for
1966      * privilege ring 0. So remove any traces of the hw thread ID from
1967      * the offset in the IC BAR as it could be interpreted as the ring
1968      * privilege when calling the underlying direct access functions.
1969      */
1970     return offset & ((1ull << xive->ic_shift) - 1);
1971 }
1972 
1973 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
1974 {
1975     PnvChip *chip = xive->chip;
1976     PowerPCCPU *cpu = NULL;
1977 
1978     cpu = pnv_chip_find_cpu(chip, pir);
1979     if (!cpu) {
1980         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
1981         return NULL;
1982     }
1983 
1984     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
1985         xive2_error(xive, "IC: CPU %x is not enabled", pir);
1986     }
1987 
1988     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1989 }
1990 
1991 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
1992                                               unsigned size)
1993 {
1994     PnvXive2 *xive = PNV_XIVE2(opaque);
1995     XivePresenter *xptr = XIVE_PRESENTER(xive);
1996     hwaddr hw_page_offset;
1997     uint32_t pir;
1998     XiveTCTX *tctx;
1999     uint64_t val = -1;
2000 
2001     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2002     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2003     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2004     if (tctx) {
2005         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2006     }
2007 
2008     return val;
2009 }
2010 
2011 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2012                                            uint64_t val, unsigned size)
2013 {
2014     PnvXive2 *xive = PNV_XIVE2(opaque);
2015     XivePresenter *xptr = XIVE_PRESENTER(xive);
2016     hwaddr hw_page_offset;
2017     uint32_t pir;
2018     XiveTCTX *tctx;
2019 
2020     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2021     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2022     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2023     if (tctx) {
2024         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2025     }
2026 }
2027 
2028 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2029     .read = pnv_xive2_ic_tm_indirect_read,
2030     .write = pnv_xive2_ic_tm_indirect_write,
2031     .endianness = DEVICE_BIG_ENDIAN,
2032     .valid = {
2033         .min_access_size = 1,
2034         .max_access_size = 8,
2035     },
2036     .impl = {
2037         .min_access_size = 1,
2038         .max_access_size = 8,
2039     },
2040 };
2041 
2042 /*
2043  * TIMA ops
2044  */
2045 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2046                                uint64_t value, unsigned size)
2047 {
2048     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2049     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2050     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2051     XivePresenter *xptr = XIVE_PRESENTER(xive);
2052 
2053     xive_tctx_tm_write(xptr, tctx, offset, value, size);
2054 }
2055 
2056 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2057 {
2058     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2059     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2060     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2061     XivePresenter *xptr = XIVE_PRESENTER(xive);
2062 
2063     return xive_tctx_tm_read(xptr, tctx, offset, size);
2064 }
2065 
2066 static const MemoryRegionOps pnv_xive2_tm_ops = {
2067     .read = pnv_xive2_tm_read,
2068     .write = pnv_xive2_tm_write,
2069     .endianness = DEVICE_BIG_ENDIAN,
2070     .valid = {
2071         .min_access_size = 1,
2072         .max_access_size = 8,
2073     },
2074     .impl = {
2075         .min_access_size = 1,
2076         .max_access_size = 8,
2077     },
2078 };
2079 
2080 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
2081                                    unsigned size)
2082 {
2083     PnvXive2 *xive = PNV_XIVE2(opaque);
2084 
2085     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
2086     return -1;
2087 }
2088 
2089 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
2090                                 uint64_t val, unsigned size)
2091 {
2092     PnvXive2 *xive = PNV_XIVE2(opaque);
2093 
2094     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
2095 }
2096 
2097 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2098     .read = pnv_xive2_nvc_read,
2099     .write = pnv_xive2_nvc_write,
2100     .endianness = DEVICE_BIG_ENDIAN,
2101     .valid = {
2102         .min_access_size = 8,
2103         .max_access_size = 8,
2104     },
2105     .impl = {
2106         .min_access_size = 8,
2107         .max_access_size = 8,
2108     },
2109 };
2110 
2111 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
2112                                     unsigned size)
2113 {
2114     PnvXive2 *xive = PNV_XIVE2(opaque);
2115 
2116     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
2117     return -1;
2118 }
2119 
2120 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
2121                                  uint64_t val, unsigned size)
2122 {
2123     PnvXive2 *xive = PNV_XIVE2(opaque);
2124 
2125     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
2126 }
2127 
2128 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2129     .read = pnv_xive2_nvpg_read,
2130     .write = pnv_xive2_nvpg_write,
2131     .endianness = DEVICE_BIG_ENDIAN,
2132     .valid = {
2133         .min_access_size = 8,
2134         .max_access_size = 8,
2135     },
2136     .impl = {
2137         .min_access_size = 8,
2138         .max_access_size = 8,
2139     },
2140 };
2141 
2142 /*
2143  * POWER10 default capabilities: 0x2000120076f000FC
2144  */
2145 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
2146 
2147 /*
2148  * POWER10 default configuration: 0x0030000033000000
2149  *
2150  * 8bits thread id was dropped for P10
2151  */
2152 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2153 
2154 static void pnv_xive2_reset(void *dev)
2155 {
2156     PnvXive2 *xive = PNV_XIVE2(dev);
2157     XiveSource *xsrc = &xive->ipi_source;
2158     Xive2EndSource *end_xsrc = &xive->end_source;
2159 
2160     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2161     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2162 
2163     /* HW hardwires the #Topology of the chip in the block field */
2164     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2165         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2166 
2167     /* VC and PC cache watch assign mechanism */
2168     xive->vc_regs[VC_ENDC_CFG >> 3] =
2169         SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2170     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2171         SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2172 
2173     /* Set default page size to 64k */
2174     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2175     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2176 
2177     /* Clear source MMIOs */
2178     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2179         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2180     }
2181 
2182     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2183         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2184     }
2185 }
2186 
2187 /*
2188  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2189  *  software.
2190  */
2191 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2192 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2193 
2194 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2195 {
2196     PnvXive2 *xive = PNV_XIVE2(dev);
2197     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2198     XiveSource *xsrc = &xive->ipi_source;
2199     Xive2EndSource *end_xsrc = &xive->end_source;
2200     Error *local_err = NULL;
2201     int i;
2202 
2203     pxc->parent_realize(dev, &local_err);
2204     if (local_err) {
2205         error_propagate(errp, local_err);
2206         return;
2207     }
2208 
2209     assert(xive->chip);
2210 
2211     /*
2212      * The XiveSource and Xive2EndSource objects are realized with the
2213      * maximum allowed HW configuration. The ESB MMIO regions will be
2214      * resized dynamically when the controller is configured by the FW
2215      * to limit accesses to resources not provisioned.
2216      */
2217     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2218                             &error_fatal);
2219     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2220                             &error_fatal);
2221     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2222                              &error_fatal);
2223     qdev_realize(DEVICE(xsrc), NULL, &local_err);
2224     if (local_err) {
2225         error_propagate(errp, local_err);
2226         return;
2227     }
2228 
2229     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2230                             &error_fatal);
2231     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2232                              &error_abort);
2233     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2234     if (local_err) {
2235         error_propagate(errp, local_err);
2236         return;
2237     }
2238 
2239     /* XSCOM region, used for initial configuration of the BARs */
2240     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2241                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
2242                           PNV10_XSCOM_XIVE2_SIZE << 3);
2243 
2244     /* Interrupt controller MMIO regions */
2245     xive->ic_shift = 16;
2246     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2247                        PNV10_XIVE2_IC_SIZE);
2248 
2249     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2250         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2251                          pnv_xive2_ic_regions[i].ops, xive,
2252                          pnv_xive2_ic_regions[i].name,
2253                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2254     }
2255 
2256     /*
2257      * VC MMIO regions.
2258      */
2259     xive->esb_shift = 16;
2260     xive->end_shift = 16;
2261     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2262                        PNV10_XIVE2_ESB_SIZE);
2263     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2264                        PNV10_XIVE2_END_SIZE);
2265 
2266     /* Presenter Controller MMIO region (not modeled) */
2267     xive->nvc_shift = 16;
2268     xive->nvpg_shift = 16;
2269     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2270                           &pnv_xive2_nvc_ops, xive,
2271                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2272 
2273     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2274                           &pnv_xive2_nvpg_ops, xive,
2275                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2276 
2277     /* Thread Interrupt Management Area (Direct) */
2278     xive->tm_shift = 16;
2279     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2280                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2281 
2282     qemu_register_reset(pnv_xive2_reset, dev);
2283 }
2284 
2285 static Property pnv_xive2_properties[] = {
2286     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2287     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2288     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2289     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2290     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2291     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2292     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2293                        PNV_XIVE2_CAPABILITIES),
2294     DEFINE_PROP_UINT64("config", PnvXive2, config,
2295                        PNV_XIVE2_CONFIGURATION),
2296     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2297     DEFINE_PROP_END_OF_LIST(),
2298 };
2299 
2300 static void pnv_xive2_instance_init(Object *obj)
2301 {
2302     PnvXive2 *xive = PNV_XIVE2(obj);
2303 
2304     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2305                             TYPE_XIVE_SOURCE);
2306     object_initialize_child(obj, "end_source", &xive->end_source,
2307                             TYPE_XIVE2_END_SOURCE);
2308 }
2309 
2310 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2311                               int xscom_offset)
2312 {
2313     const char compat_p10[] = "ibm,power10-xive-x";
2314     char *name;
2315     int offset;
2316     uint32_t reg[] = {
2317         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2318         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2319     };
2320 
2321     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2322     offset = fdt_add_subnode(fdt, xscom_offset, name);
2323     _FDT(offset);
2324     g_free(name);
2325 
2326     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2327     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2328                      sizeof(compat_p10)));
2329     return 0;
2330 }
2331 
2332 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
2333 {
2334     DeviceClass *dc = DEVICE_CLASS(klass);
2335     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2336     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2337     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2338     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2339     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2340 
2341     xdc->dt_xscom  = pnv_xive2_dt_xscom;
2342 
2343     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2344     device_class_set_parent_realize(dc, pnv_xive2_realize,
2345                                     &pxc->parent_realize);
2346     device_class_set_props(dc, pnv_xive2_properties);
2347 
2348     xrc->get_eas   = pnv_xive2_get_eas;
2349     xrc->get_pq    = pnv_xive2_get_pq;
2350     xrc->set_pq    = pnv_xive2_set_pq;
2351     xrc->get_end   = pnv_xive2_get_end;
2352     xrc->write_end = pnv_xive2_write_end;
2353     xrc->get_nvp   = pnv_xive2_get_nvp;
2354     xrc->write_nvp = pnv_xive2_write_nvp;
2355     xrc->get_config  = pnv_xive2_get_config;
2356     xrc->get_block_id = pnv_xive2_get_block_id;
2357 
2358     xnc->notify    = pnv_xive2_notify;
2359 
2360     xpc->match_nvt  = pnv_xive2_match_nvt;
2361     xpc->get_config = pnv_xive2_presenter_get_config;
2362 };
2363 
2364 static const TypeInfo pnv_xive2_info = {
2365     .name          = TYPE_PNV_XIVE2,
2366     .parent        = TYPE_XIVE2_ROUTER,
2367     .instance_init = pnv_xive2_instance_init,
2368     .instance_size = sizeof(PnvXive2),
2369     .class_init    = pnv_xive2_class_init,
2370     .class_size    = sizeof(PnvXive2Class),
2371     .interfaces    = (InterfaceInfo[]) {
2372         { TYPE_PNV_XSCOM_INTERFACE },
2373         { }
2374     }
2375 };
2376 
2377 static void pnv_xive2_register_types(void)
2378 {
2379     type_register_static(&pnv_xive2_info);
2380 }
2381 
2382 type_init(pnv_xive2_register_types)
2383 
2384 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
2385                                      GString *buf)
2386 {
2387     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
2388     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
2389 
2390     if (!xive2_nvp_is_valid(nvp)) {
2391         return;
2392     }
2393 
2394     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x",
2395                            nvp_idx, eq_blk, eq_idx,
2396                            xive_get_field32(NVP2_W2_IPB, nvp->w2));
2397     /*
2398      * When the NVP is HW controlled, more fields are updated
2399      */
2400     if (xive2_nvp_is_hw(nvp)) {
2401         g_string_append_printf(buf, " CPPR:%02x",
2402                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
2403         if (xive2_nvp_is_co(nvp)) {
2404             g_string_append_printf(buf, " CO:%04x",
2405                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
2406         }
2407     }
2408     g_string_append_c(buf, '\n');
2409 }
2410 
2411 /*
2412  * If the table is direct, we can compute the number of PQ entries
2413  * provisioned by FW.
2414  */
2415 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2416 {
2417     uint8_t blk = pnv_xive2_block_id(xive);
2418     uint64_t vsd = xive->vsds[VST_ESB][blk];
2419     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2420 
2421     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2422 }
2423 
2424 /*
2425  * Compute the number of entries per indirect subpage.
2426  */
2427 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2428 {
2429     uint8_t blk = pnv_xive2_block_id(xive);
2430     uint64_t vsd = xive->vsds[type][blk];
2431     const XiveVstInfo *info = &vst_infos[type];
2432     uint64_t vsd_addr;
2433     uint32_t page_shift;
2434 
2435     /* For direct tables, fake a valid value */
2436     if (!(VSD_INDIRECT & vsd)) {
2437         return 1;
2438     }
2439 
2440     /* Get the page size of the indirect table. */
2441     vsd_addr = vsd & VSD_ADDRESS_MASK;
2442     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2443 
2444     if (!(vsd & VSD_ADDRESS_MASK)) {
2445 #ifdef XIVE2_DEBUG
2446         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2447 #endif
2448         return 0;
2449     }
2450 
2451     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2452 
2453     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2454         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2455                    page_shift);
2456         return 0;
2457     }
2458 
2459     return (1ull << page_shift) / info->size;
2460 }
2461 
2462 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2463 {
2464     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2465     uint8_t blk = pnv_xive2_block_id(xive);
2466     uint8_t chip_id = xive->chip->chip_id;
2467     uint32_t srcno0 = XIVE_EAS(blk, 0);
2468     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2469     Xive2Eas eas;
2470     Xive2End end;
2471     Xive2Nvp nvp;
2472     int i;
2473     uint64_t xive_nvp_per_subpage;
2474 
2475     g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2476                            blk, srcno0, srcno0 + nr_esbs - 1);
2477     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2478 
2479     g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2480                            blk, srcno0, srcno0 + nr_esbs - 1);
2481     for (i = 0; i < nr_esbs; i++) {
2482         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2483             break;
2484         }
2485         if (!xive2_eas_is_masked(&eas)) {
2486             xive2_eas_pic_print_info(&eas, i, buf);
2487         }
2488     }
2489 
2490     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2491                            chip_id, blk);
2492     i = 0;
2493     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2494         xive2_end_eas_pic_print_info(&end, i++, buf);
2495     }
2496 
2497     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2498     i = 0;
2499     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2500         xive2_end_pic_print_info(&end, i++, buf);
2501     }
2502 
2503     g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2504                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2505     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2506     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2507         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2508             xive2_nvp_pic_print_info(&nvp, i++, buf);
2509         }
2510     }
2511 }
2512