xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision 4c81813e25d24ece49141572ad5f07d8efe7bf4d)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/ppc/fdt.h"
17 #include "hw/ppc/pnv.h"
18 #include "hw/ppc/pnv_chip.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/qtest.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive2_regs.h"
33 
34 #undef XIVE2_DEBUG
35 
36 /* XIVE Sync or Flush Notification Block */
37 typedef struct XiveSfnBlock {
38     uint8_t bytes[32];
39 } XiveSfnBlock;
40 
41 /* XIVE Thread Sync or Flush Notification Area */
42 typedef struct XiveThreadNA {
43     XiveSfnBlock topo[16];
44 } XiveThreadNA;
45 
46 /*
47  * Virtual structures table (VST)
48  */
49 #define SBE_PER_BYTE   4
50 
51 typedef struct XiveVstInfo {
52     const char *name;
53     uint32_t    size;
54     uint32_t    max_blocks;
55 } XiveVstInfo;
56 
57 static const XiveVstInfo vst_infos[] = {
58 
59     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),     16 },
60     [VST_ESB]  = { "ESB",  1,                    16 },
61     [VST_END]  = { "ENDT", sizeof(Xive2End),     16 },
62 
63     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),     16 },
64     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc),    16 },
65     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc),    16 },
66 
67     [VST_IC]  =  { "IC",   1, /* ? */            16 }, /* Topology # */
68     [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
69 
70     /*
71      * This table contains the backing store pages for the interrupt
72      * fifos of the VC sub-engine in case of overflow.
73      *
74      * 0 - IPI,
75      * 1 - HWD,
76      * 2 - NxC,
77      * 3 - INT,
78      * 4 - OS-Queue,
79      * 5 - Pool-Queue,
80      * 6 - Hard-Queue
81      */
82     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
83 };
84 
85 #define xive2_error(xive, fmt, ...)                                      \
86     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
87                   (xive)->chip->chip_id, ## __VA_ARGS__);
88 
89 /*
90  * TODO: Document block id override
91  */
92 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
93 {
94     uint8_t blk = xive->chip->chip_id;
95     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
96 
97     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
98         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
99     }
100 
101     return blk;
102 }
103 
104 /*
105  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
106  * of the chips is good enough.
107  *
108  * TODO: Block scope support
109  */
110 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
111 {
112     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
113     int i;
114 
115     for (i = 0; i < pnv->num_chips; i++) {
116         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
117         PnvXive2 *xive = &chip10->xive;
118 
119         if (pnv_xive2_block_id(xive) == blk) {
120             return xive;
121         }
122     }
123     return NULL;
124 }
125 
126 /*
127  * VST accessors for ESB, EAT, ENDT, NVP
128  *
129  * Indirect VST tables are arrays of VSDs pointing to a page (of same
130  * size). Each page is a direct VST table.
131  */
132 
133 #define XIVE_VSD_SIZE 8
134 
135 /* Indirect page size can be 4K, 64K, 2M, 16M. */
136 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
137 {
138      return page_shift == 12 || page_shift == 16 ||
139          page_shift == 21 || page_shift == 24;
140 }
141 
142 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
143                                           uint64_t vsd, uint32_t idx)
144 {
145     const XiveVstInfo *info = &vst_infos[type];
146     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
147     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
148     uint32_t idx_max;
149 
150     idx_max = vst_tsize / info->size - 1;
151     if (idx > idx_max) {
152 #ifdef XIVE2_DEBUG
153         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
154                    info->name, idx, idx_max);
155 #endif
156         return 0;
157     }
158 
159     return vst_addr + idx * info->size;
160 }
161 
162 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
163                                             uint64_t vsd, uint32_t idx)
164 {
165     const XiveVstInfo *info = &vst_infos[type];
166     uint64_t vsd_addr;
167     uint32_t vsd_idx;
168     uint32_t page_shift;
169     uint32_t vst_per_page;
170 
171     /* Get the page size of the indirect table. */
172     vsd_addr = vsd & VSD_ADDRESS_MASK;
173     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
174 
175     if (!(vsd & VSD_ADDRESS_MASK)) {
176 #ifdef XIVE2_DEBUG
177         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
178 #endif
179         return 0;
180     }
181 
182     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
183 
184     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
185         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
186                    page_shift);
187         return 0;
188     }
189 
190     vst_per_page = (1ull << page_shift) / info->size;
191     vsd_idx = idx / vst_per_page;
192 
193     /* Load the VSD we are looking for, if not already done */
194     if (vsd_idx) {
195         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
196         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
197                    MEMTXATTRS_UNSPECIFIED);
198 
199         if (!(vsd & VSD_ADDRESS_MASK)) {
200 #ifdef XIVE2_DEBUG
201             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
202 #endif
203             return 0;
204         }
205 
206         /*
207          * Check that the pages have a consistent size across the
208          * indirect table
209          */
210         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
211             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
212                        info->name, idx);
213             return 0;
214         }
215     }
216 
217     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
218 }
219 
220 static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
221 {
222     uint8_t shift =  GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
223                               xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
224     return shift > 8 ? 0 : shift;
225 }
226 
227 static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
228 {
229     uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
230                              xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
231     return shift > 8 ? 0 : shift;
232 }
233 
234 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
235                                    uint32_t idx)
236 {
237     const XiveVstInfo *info = &vst_infos[type];
238     uint64_t vsd;
239 
240     if (blk >= info->max_blocks) {
241         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
242                    blk, info->name, idx);
243         return 0;
244     }
245 
246     vsd = xive->vsds[type][blk];
247 
248     /* Remote VST access */
249     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
250         xive = pnv_xive2_get_remote(blk);
251 
252         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
253     }
254 
255     if (type == VST_NVG) {
256         idx >>= pnv_xive2_nvg_table_compress_shift(xive);
257     } else if (type == VST_NVC) {
258         idx >>= pnv_xive2_nvc_table_compress_shift(xive);
259     }
260 
261     if (VSD_INDIRECT & vsd) {
262         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
263     }
264 
265     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
266 }
267 
268 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
269                              uint32_t idx, void *data)
270 {
271     const XiveVstInfo *info = &vst_infos[type];
272     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
273     MemTxResult result;
274 
275     if (!addr) {
276         return -1;
277     }
278 
279     result = address_space_read(&address_space_memory, addr,
280                                 MEMTXATTRS_UNSPECIFIED, data,
281                                 info->size);
282     if (result != MEMTX_OK) {
283         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
284                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
285         return -1;
286     }
287     return 0;
288 }
289 
290 #define XIVE_VST_WORD_ALL -1
291 
292 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
293                                uint32_t idx, void *data, uint32_t word_number)
294 {
295     const XiveVstInfo *info = &vst_infos[type];
296     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
297     MemTxResult result;
298 
299     if (!addr) {
300         return -1;
301     }
302 
303     if (word_number == XIVE_VST_WORD_ALL) {
304         result = address_space_write(&address_space_memory, addr,
305                                      MEMTXATTRS_UNSPECIFIED, data,
306                                      info->size);
307     } else {
308         result = address_space_write(&address_space_memory,
309                                      addr + word_number * 4,
310                                      MEMTXATTRS_UNSPECIFIED,
311                                      data + word_number * 4, 4);
312     }
313 
314     if (result != MEMTX_OK) {
315         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
316                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
317         return -1;
318     }
319     return 0;
320 }
321 
322 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
323                              uint8_t *pq)
324 {
325     PnvXive2 *xive = PNV_XIVE2(xrtr);
326 
327     if (pnv_xive2_block_id(xive) != blk) {
328         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
329         return -1;
330     }
331 
332     *pq = xive_source_esb_get(&xive->ipi_source, idx);
333     return 0;
334 }
335 
336 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
337                              uint8_t *pq)
338 {
339     PnvXive2 *xive = PNV_XIVE2(xrtr);
340 
341     if (pnv_xive2_block_id(xive) != blk) {
342         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
343         return -1;
344     }
345 
346     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
347     return 0;
348 }
349 
350 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
351                              Xive2End *end)
352 {
353     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
354 }
355 
356 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
357                                Xive2End *end, uint8_t word_number)
358 {
359     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
360                               word_number);
361 }
362 
363 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
364 {
365     if (!qtest_enabled()) {
366         PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
367         return ppc_cpu_pir(cpu);
368     }
369     return 0;
370 }
371 
372 /*
373  * After SW injects a Queue Sync or Cache Flush operation, HW will notify
374  * SW of the completion of the operation by writing a byte of all 1's (0xff)
375  * to a specific memory location.  The memory location is calculated by first
376  * looking up a base address in the SYNC VSD using the Topology ID of the
377  * originating thread as the "block" number.  This points to a
378  * 64k block of memory that is further divided into 128 512 byte chunks of
379  * memory, which is indexed by the thread id of the requesting thread.
380  * Finally, this 512 byte chunk of memory is divided into 16 32 byte
381  * chunks which are indexed by the topology id of the targeted IC's chip.
382  * The values below are the offsets into that 32 byte chunk of memory for
383  * each type of cache flush or queue sync operation.
384  */
385 #define PNV_XIVE2_QUEUE_IPI              0x00
386 #define PNV_XIVE2_QUEUE_HW               0x01
387 #define PNV_XIVE2_QUEUE_NXC              0x02
388 #define PNV_XIVE2_QUEUE_INT              0x03
389 #define PNV_XIVE2_QUEUE_OS               0x04
390 #define PNV_XIVE2_QUEUE_POOL             0x05
391 #define PNV_XIVE2_QUEUE_HARD             0x06
392 #define PNV_XIVE2_CACHE_ENDC             0x08
393 #define PNV_XIVE2_CACHE_ESBC             0x09
394 #define PNV_XIVE2_CACHE_EASC             0x0a
395 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO   0x10
396 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO    0x11
397 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI   0x12
398 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI    0x13
399 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI   0x14
400 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI    0x15
401 #define PNV_XIVE2_CACHE_NXC              0x18
402 
403 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
404 {
405     uint64_t addr;
406     int pir = pnv_xive2_get_current_pir(xive);
407     int thread_nr = PNV10_PIR2THREAD(pir);
408     int thread_topo_id = PNV10_PIR2CHIP(pir);
409     int ic_topo_id = xive->chip->chip_id;
410     uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
411     uint8_t byte = 0xff;
412     MemTxResult result;
413 
414     /* Retrieve the address of requesting thread's notification area */
415     addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
416 
417     if (!addr) {
418         xive2_error(xive, "VST: no SYNC entry %x/%x !?",
419                     thread_topo_id, thread_nr);
420         return -1;
421     }
422 
423     address_space_stb(&address_space_memory, addr + offset + type, byte,
424                       MEMTXATTRS_UNSPECIFIED, &result);
425     assert(result == MEMTX_OK);
426 
427     return 0;
428 }
429 
430 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
431 {
432     uint8_t  blk;
433     uint32_t idx;
434     int i, spec_reg, data_reg;
435     uint64_t endc_watch[4];
436 
437     assert(watch_engine < ARRAY_SIZE(endc_watch));
438 
439     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
440     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
441     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
442     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
443 
444     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
445         endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
446     }
447 
448     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
449                               XIVE_VST_WORD_ALL);
450 }
451 
452 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
453 {
454     uint8_t  blk;
455     uint32_t idx;
456     uint64_t endc_watch[4] = { 0 };
457     int i, spec_reg, data_reg;
458 
459     assert(watch_engine < ARRAY_SIZE(endc_watch));
460 
461     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
462     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
463     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
464     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
465 
466     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
467         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
468     }
469 
470     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
471         xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
472     }
473 }
474 
475 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
476                              Xive2Nvp *nvp)
477 {
478     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
479 }
480 
481 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
482                                Xive2Nvp *nvp, uint8_t word_number)
483 {
484     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
485                               word_number);
486 }
487 
488 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
489 {
490     switch (nxc_type) {
491     case PC_NXC_WATCH_NXC_NVP:
492         *table_type = VST_NVP;
493         break;
494     case PC_NXC_WATCH_NXC_NVG:
495         *table_type = VST_NVG;
496         break;
497     case PC_NXC_WATCH_NXC_NVC:
498         *table_type = VST_NVC;
499         break;
500     default:
501         qemu_log_mask(LOG_GUEST_ERROR,
502                       "XIVE: invalid table type for nxc operation\n");
503         return -1;
504     }
505     return 0;
506 }
507 
508 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
509 {
510     uint8_t  blk, nxc_type;
511     uint32_t idx, table_type = -1;
512     int i, spec_reg, data_reg;
513     uint64_t nxc_watch[4];
514 
515     assert(watch_engine < ARRAY_SIZE(nxc_watch));
516 
517     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
518     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
519     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
520     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
521     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
522 
523     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
524 
525     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
526         nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
527     }
528 
529     return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
530                               XIVE_VST_WORD_ALL);
531 }
532 
533 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
534 {
535     uint8_t  blk, nxc_type;
536     uint32_t idx, table_type = -1;
537     uint64_t nxc_watch[4] = { 0 };
538     int i, spec_reg, data_reg;
539 
540     assert(watch_engine < ARRAY_SIZE(nxc_watch));
541 
542     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
543     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
544     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
545     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
546     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
547 
548     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
549 
550     if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
551         xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
552                     blk, idx, vst_infos[table_type].name);
553     }
554 
555     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
556         xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
557     }
558 }
559 
560 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
561                             Xive2Eas *eas)
562 {
563     PnvXive2 *xive = PNV_XIVE2(xrtr);
564 
565     if (pnv_xive2_block_id(xive) != blk) {
566         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
567         return -1;
568     }
569 
570     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
571 }
572 
573 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
574 {
575     PnvXive2 *xive = PNV_XIVE2(xrtr);
576     uint32_t cfg = 0;
577 
578     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
579         cfg |= XIVE2_GEN1_TIMA_OS;
580     }
581 
582     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
583         cfg |= XIVE2_VP_SAVE_RESTORE;
584     }
585 
586     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
587               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
588         cfg |= XIVE2_THREADID_8BITS;
589     }
590 
591     return cfg;
592 }
593 
594 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
595 {
596     int pir = ppc_cpu_pir(cpu);
597     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
598     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
599     uint32_t bit = pir & 0x3f;
600 
601     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
602 }
603 
604 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
605                                uint8_t nvt_blk, uint32_t nvt_idx,
606                                bool cam_ignore, uint8_t priority,
607                                uint32_t logic_serv, XiveTCTXMatch *match)
608 {
609     PnvXive2 *xive = PNV_XIVE2(xptr);
610     PnvChip *chip = xive->chip;
611     int count = 0;
612     int i, j;
613     bool gen1_tima_os =
614         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
615 
616     for (i = 0; i < chip->nr_cores; i++) {
617         PnvCore *pc = chip->cores[i];
618         CPUCore *cc = CPU_CORE(pc);
619 
620         for (j = 0; j < cc->nr_threads; j++) {
621             PowerPCCPU *cpu = pc->threads[j];
622             XiveTCTX *tctx;
623             int ring;
624 
625             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
626                 continue;
627             }
628 
629             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
630 
631             if (gen1_tima_os) {
632                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
633                                                  nvt_idx, cam_ignore,
634                                                  logic_serv);
635             } else {
636                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
637                                                    nvt_idx, cam_ignore,
638                                                    logic_serv);
639             }
640 
641             /*
642              * Save the context and follow on to catch duplicates,
643              * that we don't support yet.
644              */
645             if (ring != -1) {
646                 if (match->tctx) {
647                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
648                                   "thread context NVT %x/%x\n",
649                                   nvt_blk, nvt_idx);
650                     return false;
651                 }
652 
653                 match->ring = ring;
654                 match->tctx = tctx;
655                 count++;
656             }
657         }
658     }
659 
660     return count;
661 }
662 
663 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
664 {
665     PnvXive2 *xive = PNV_XIVE2(xptr);
666     uint32_t cfg = 0;
667 
668     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
669         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
670     }
671     return cfg;
672 }
673 
674 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
675 {
676     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
677 }
678 
679 /*
680  * The TIMA MMIO space is shared among the chips and to identify the
681  * chip from which the access is being done, we extract the chip id
682  * from the PIR.
683  */
684 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
685 {
686     int pir = ppc_cpu_pir(cpu);
687     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
688     PnvXive2 *xive = PNV_XIVE2(xptr);
689 
690     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
691         xive2_error(xive, "IC: CPU %x is not enabled", pir);
692     }
693     return xive;
694 }
695 
696 /*
697  * The internal sources of the interrupt controller have no knowledge
698  * of the XIVE2 chip on which they reside. Encode the block id in the
699  * source interrupt number before forwarding the source event
700  * notification to the Router. This is required on a multichip system.
701  */
702 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
703 {
704     PnvXive2 *xive = PNV_XIVE2(xn);
705     uint8_t blk = pnv_xive2_block_id(xive);
706 
707     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
708 }
709 
710 /*
711  * Set Translation Tables
712  *
713  * TODO add support for multiple sets
714  */
715 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
716 {
717     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
718     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
719                                   xive->cq_regs[CQ_TAR >> 3]);
720 
721     switch (tsel) {
722     case CQ_TAR_NVPG:
723     case CQ_TAR_ESB:
724     case CQ_TAR_END:
725     case CQ_TAR_NVC:
726         xive->tables[tsel][entry] = val;
727         break;
728     default:
729         xive2_error(xive, "IC: unsupported table %d", tsel);
730         return -1;
731     }
732 
733     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
734         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
735                      xive->cq_regs[CQ_TAR >> 3], ++entry);
736     }
737 
738     return 0;
739 }
740 /*
741  * Virtual Structure Tables (VST) configuration
742  */
743 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
744                                         uint8_t blk, uint64_t vsd)
745 {
746     Xive2EndSource *end_xsrc = &xive->end_source;
747     XiveSource *xsrc = &xive->ipi_source;
748     const XiveVstInfo *info = &vst_infos[type];
749     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
750     uint64_t vst_tsize = 1ull << page_shift;
751     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
752 
753     /* Basic checks */
754 
755     if (VSD_INDIRECT & vsd) {
756         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
757             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
758                        page_shift);
759             return;
760         }
761     }
762 
763     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
764         xive2_error(xive, "VST: %s table address 0x%"PRIx64
765                     " is not aligned with page shift %d",
766                     info->name, vst_addr, page_shift);
767         return;
768     }
769 
770     /* Record the table configuration (in SRAM on HW) */
771     xive->vsds[type][blk] = vsd;
772 
773     /* Now tune the models with the configuration provided by the FW */
774 
775     switch (type) {
776     case VST_ESB:
777         /*
778          * Backing store pages for the source PQ bits. The model does
779          * not use these PQ bits backed in RAM because the XiveSource
780          * model has its own.
781          *
782          * If the table is direct, we can compute the number of PQ
783          * entries provisioned by FW (such as skiboot) and resize the
784          * ESB window accordingly.
785          */
786         if (memory_region_is_mapped(&xsrc->esb_mmio)) {
787             memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
788         }
789         if (!(VSD_INDIRECT & vsd)) {
790             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
791                                    * (1ull << xsrc->esb_shift));
792         }
793 
794         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
795         break;
796 
797     case VST_EAS:  /* Nothing to be done */
798         break;
799 
800     case VST_END:
801         /*
802          * Backing store pages for the END.
803          */
804         if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
805             memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
806         }
807         if (!(VSD_INDIRECT & vsd)) {
808             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
809                                    * (1ull << end_xsrc->esb_shift));
810         }
811         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
812         break;
813 
814     case VST_NVP:  /* Not modeled */
815     case VST_NVG:  /* Not modeled */
816     case VST_NVC:  /* Not modeled */
817     case VST_IC:   /* Not modeled */
818     case VST_SYNC: /* Not modeled */
819     case VST_ERQ:  /* Not modeled */
820         break;
821 
822     default:
823         g_assert_not_reached();
824     }
825 }
826 
827 /*
828  * Both PC and VC sub-engines are configured as each use the Virtual
829  * Structure Tables
830  */
831 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
832                                    uint8_t type, uint8_t blk)
833 {
834     uint8_t mode = GETFIELD(VSD_MODE, vsd);
835     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
836 
837     if (type > VST_ERQ) {
838         xive2_error(xive, "VST: invalid table type %d", type);
839         return;
840     }
841 
842     if (blk >= vst_infos[type].max_blocks) {
843         xive2_error(xive, "VST: invalid block id %d for"
844                       " %s table", blk, vst_infos[type].name);
845         return;
846     }
847 
848     if (!vst_addr) {
849         xive2_error(xive, "VST: invalid %s table address",
850                    vst_infos[type].name);
851         return;
852     }
853 
854     switch (mode) {
855     case VSD_MODE_FORWARD:
856         xive->vsds[type][blk] = vsd;
857         break;
858 
859     case VSD_MODE_EXCLUSIVE:
860         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
861         break;
862 
863     default:
864         xive2_error(xive, "VST: unsupported table mode %d", mode);
865         return;
866     }
867 }
868 
869 static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
870 {
871     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
872                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
873     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
874                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
875 
876     pnv_xive2_vst_set_data(xive, vsd, type, blk);
877 }
878 
879 /*
880  * MMIO handlers
881  */
882 
883 
884 /*
885  * IC BAR layout
886  *
887  * Page 0: Internal CQ register accesses (reads & writes)
888  * Page 1: Internal PC register accesses (reads & writes)
889  * Page 2: Internal VC register accesses (reads & writes)
890  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
891  * Page 4: Notify Port page (writes only, w/data),
892  * Page 5: Reserved
893  * Page 6: Sync Poll page (writes only, dataless)
894  * Page 7: Sync Inject page (writes only, dataless)
895  * Page 8: LSI Trigger page (writes only, dataless)
896  * Page 9: LSI SB Management page (reads & writes dataless)
897  * Pages 10-255: Reserved
898  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
899  *                covering the 128 threads in P10.
900  * Pages 384-511: Reserved
901  */
902 typedef struct PnvXive2Region {
903     const char *name;
904     uint32_t pgoff;
905     uint32_t pgsize;
906     const MemoryRegionOps *ops;
907 } PnvXive2Region;
908 
909 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
910 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
911 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
912 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
913 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
914 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
915 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
916 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
917 
918 /* 512 pages. 4K: 2M range, 64K: 32M range */
919 static const PnvXive2Region pnv_xive2_ic_regions[] = {
920     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
921     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
922     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
923     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
924     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
925     /* page 5 reserved */
926     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
927     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
928     /* pages 10-255 reserved */
929     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
930     /* pages 384-511 reserved */
931 };
932 
933 /*
934  * CQ operations
935  */
936 
937 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
938                                         unsigned size)
939 {
940     PnvXive2 *xive = PNV_XIVE2(opaque);
941     uint32_t reg = offset >> 3;
942     uint64_t val = 0;
943 
944     switch (offset) {
945     case CQ_XIVE_CAP: /* Set at reset */
946     case CQ_XIVE_CFG:
947         val = xive->cq_regs[reg];
948         break;
949     case CQ_MSGSND: /* TODO check the #cores of the machine */
950         val = 0xffffffff00000000;
951         break;
952     case CQ_CFG_PB_GEN:
953         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
954         break;
955     default:
956         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
957     }
958 
959     return val;
960 }
961 
962 static uint64_t pnv_xive2_bar_size(uint64_t val)
963 {
964     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
965 }
966 
967 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
968                                   uint64_t val, unsigned size)
969 {
970     PnvXive2 *xive = PNV_XIVE2(opaque);
971     MemoryRegion *sysmem = get_system_memory();
972     uint32_t reg = offset >> 3;
973     int i;
974 
975     switch (offset) {
976     case CQ_XIVE_CFG:
977     case CQ_RST_CTL: /* TODO: reset all BARs */
978         break;
979 
980     case CQ_IC_BAR:
981         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
982         if (!(val & CQ_IC_BAR_VALID)) {
983             xive->ic_base = 0;
984             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
985                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
986                     memory_region_del_subregion(&xive->ic_mmio,
987                                                 &xive->ic_mmios[i]);
988                 }
989                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
990             }
991         } else {
992             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
993             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
994                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
995                     memory_region_add_subregion(&xive->ic_mmio,
996                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
997                                &xive->ic_mmios[i]);
998                 }
999                 memory_region_add_subregion(sysmem, xive->ic_base,
1000                                             &xive->ic_mmio);
1001             }
1002         }
1003         break;
1004 
1005     case CQ_TM_BAR:
1006         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1007         if (!(val & CQ_TM_BAR_VALID)) {
1008             xive->tm_base = 0;
1009             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
1010                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1011             }
1012         } else {
1013             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1014             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
1015                 memory_region_add_subregion(sysmem, xive->tm_base,
1016                                             &xive->tm_mmio);
1017             }
1018         }
1019         break;
1020 
1021     case CQ_ESB_BAR:
1022         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
1023         if (!(val & CQ_BAR_VALID)) {
1024             xive->esb_base = 0;
1025             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1026                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
1027             }
1028         } else {
1029             xive->esb_base = val & CQ_BAR_ADDR;
1030             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1031                 memory_region_set_size(&xive->esb_mmio,
1032                                        pnv_xive2_bar_size(val));
1033                 memory_region_add_subregion(sysmem, xive->esb_base,
1034                                             &xive->esb_mmio);
1035             }
1036         }
1037         break;
1038 
1039     case CQ_END_BAR:
1040         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1041         if (!(val & CQ_BAR_VALID)) {
1042             xive->end_base = 0;
1043             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1044                 memory_region_del_subregion(sysmem, &xive->end_mmio);
1045             }
1046         } else {
1047             xive->end_base = val & CQ_BAR_ADDR;
1048             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1049                 memory_region_set_size(&xive->end_mmio,
1050                                        pnv_xive2_bar_size(val));
1051                 memory_region_add_subregion(sysmem, xive->end_base,
1052                                             &xive->end_mmio);
1053             }
1054         }
1055         break;
1056 
1057     case CQ_NVC_BAR:
1058         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1059         if (!(val & CQ_BAR_VALID)) {
1060             xive->nvc_base = 0;
1061             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1062                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1063             }
1064         } else {
1065             xive->nvc_base = val & CQ_BAR_ADDR;
1066             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1067                 memory_region_set_size(&xive->nvc_mmio,
1068                                        pnv_xive2_bar_size(val));
1069                 memory_region_add_subregion(sysmem, xive->nvc_base,
1070                                             &xive->nvc_mmio);
1071             }
1072         }
1073         break;
1074 
1075     case CQ_NVPG_BAR:
1076         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1077         if (!(val & CQ_BAR_VALID)) {
1078             xive->nvpg_base = 0;
1079             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1080                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1081             }
1082         } else {
1083             xive->nvpg_base = val & CQ_BAR_ADDR;
1084             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1085                 memory_region_set_size(&xive->nvpg_mmio,
1086                                        pnv_xive2_bar_size(val));
1087                 memory_region_add_subregion(sysmem, xive->nvpg_base,
1088                                             &xive->nvpg_mmio);
1089             }
1090         }
1091         break;
1092 
1093     case CQ_TAR: /* Set Translation Table Address */
1094         break;
1095     case CQ_TDR: /* Set Translation Table Data */
1096         pnv_xive2_stt_set_data(xive, val);
1097         break;
1098     case CQ_FIRMASK_OR: /* FIR error reporting */
1099         break;
1100     default:
1101         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
1102         return;
1103     }
1104 
1105     xive->cq_regs[reg] = val;
1106 }
1107 
1108 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1109     .read = pnv_xive2_ic_cq_read,
1110     .write = pnv_xive2_ic_cq_write,
1111     .endianness = DEVICE_BIG_ENDIAN,
1112     .valid = {
1113         .min_access_size = 8,
1114         .max_access_size = 8,
1115     },
1116     .impl = {
1117         .min_access_size = 8,
1118         .max_access_size = 8,
1119     },
1120 };
1121 
1122 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1123                                             uint64_t *state)
1124 {
1125     uint8_t val = 0xFF;
1126     int i;
1127 
1128     for (i = 3; i >= 0; i--) {
1129         if (BIT(i) & engine_mask) {
1130             if (!(BIT(i) & *state)) {
1131                 *state |= BIT(i);
1132                 val = 3 - i;
1133                 break;
1134             }
1135         }
1136     }
1137     return val;
1138 }
1139 
1140 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1141 {
1142     uint8_t engine_bit = 3 - watch_engine;
1143 
1144     if (*state & BIT(engine_bit)) {
1145         *state &= ~BIT(engine_bit);
1146     }
1147 }
1148 
1149 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1150 {
1151     uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1152                                     xive->vc_regs[VC_ENDC_CFG >> 3]);
1153     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1154     uint8_t val;
1155 
1156     /*
1157      * We keep track of which engines are currently busy in the
1158      * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1159      * the register, we don't return its value but the ID of an engine
1160      * it can use.
1161      * There are 4 engines. 0xFF means no engine is available.
1162      */
1163     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1164     if (val != 0xFF) {
1165         xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1166     }
1167     return val;
1168 }
1169 
1170 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1171                                                uint8_t watch_engine)
1172 {
1173     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1174 
1175     pnv_xive2_cache_watch_release(&state, watch_engine);
1176     xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1177 }
1178 
1179 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1180                                      unsigned size)
1181 {
1182     PnvXive2 *xive = PNV_XIVE2(opaque);
1183     uint64_t val = 0;
1184     uint32_t reg = offset >> 3;
1185     uint8_t watch_engine;
1186 
1187     switch (offset) {
1188     /*
1189      * VSD table settings.
1190      */
1191     case VC_VSD_TABLE_ADDR:
1192     case VC_VSD_TABLE_DATA:
1193         val = xive->vc_regs[reg];
1194         break;
1195 
1196     /*
1197      * ESB cache updates (not modeled)
1198      */
1199     case VC_ESBC_FLUSH_CTRL:
1200         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1201         val = xive->vc_regs[reg];
1202         break;
1203 
1204     case VC_ESBC_CFG:
1205         val = xive->vc_regs[reg];
1206         break;
1207 
1208     /*
1209      * EAS cache updates (not modeled)
1210      */
1211     case VC_EASC_FLUSH_CTRL:
1212         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1213         val = xive->vc_regs[reg];
1214         break;
1215 
1216     case VC_ENDC_WATCH_ASSIGN:
1217         val = pnv_xive2_endc_cache_watch_assign(xive);
1218         break;
1219 
1220     case VC_ENDC_CFG:
1221         val = xive->vc_regs[reg];
1222         break;
1223 
1224     /*
1225      * END cache updates
1226      */
1227     case VC_ENDC_WATCH0_SPEC:
1228     case VC_ENDC_WATCH1_SPEC:
1229     case VC_ENDC_WATCH2_SPEC:
1230     case VC_ENDC_WATCH3_SPEC:
1231         watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1232         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1233         pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1234         val = xive->vc_regs[reg];
1235         break;
1236 
1237     case VC_ENDC_WATCH0_DATA0:
1238     case VC_ENDC_WATCH1_DATA0:
1239     case VC_ENDC_WATCH2_DATA0:
1240     case VC_ENDC_WATCH3_DATA0:
1241         /*
1242          * Load DATA registers from cache with data requested by the
1243          * SPEC register
1244          */
1245         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1246         pnv_xive2_end_cache_load(xive, watch_engine);
1247         val = xive->vc_regs[reg];
1248         break;
1249 
1250     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1251     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1252     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1253     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1254         val = xive->vc_regs[reg];
1255         break;
1256 
1257     case VC_ENDC_FLUSH_CTRL:
1258         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1259         val = xive->vc_regs[reg];
1260         break;
1261 
1262     /*
1263      * Indirect invalidation
1264      */
1265     case VC_AT_MACRO_KILL_MASK:
1266         val = xive->vc_regs[reg];
1267         break;
1268 
1269     case VC_AT_MACRO_KILL:
1270         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1271         val = xive->vc_regs[reg];
1272         break;
1273 
1274     /*
1275      * Interrupt fifo overflow in memory backing store (Not modeled)
1276      */
1277     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1278         val = xive->vc_regs[reg];
1279         break;
1280 
1281     /*
1282      * Synchronisation
1283      */
1284     case VC_ENDC_SYNC_DONE:
1285         val = VC_ENDC_SYNC_POLL_DONE;
1286         break;
1287     default:
1288         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1289     }
1290 
1291     return val;
1292 }
1293 
1294 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1295                                   uint64_t val, unsigned size)
1296 {
1297     PnvXive2 *xive = PNV_XIVE2(opaque);
1298     uint32_t reg = offset >> 3;
1299     uint8_t watch_engine;
1300 
1301     switch (offset) {
1302     /*
1303      * VSD table settings.
1304      */
1305     case VC_VSD_TABLE_ADDR:
1306        break;
1307     case VC_VSD_TABLE_DATA:
1308         pnv_xive2_vc_vst_set_data(xive, val);
1309         break;
1310 
1311     /*
1312      * ESB cache updates (not modeled)
1313      */
1314     /* case VC_ESBC_FLUSH_CTRL: */
1315     case VC_ESBC_FLUSH_POLL:
1316         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1317         /* ESB update */
1318         break;
1319 
1320     case VC_ESBC_FLUSH_INJECT:
1321         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1322         break;
1323 
1324     case VC_ESBC_CFG:
1325         break;
1326 
1327     /*
1328      * EAS cache updates (not modeled)
1329      */
1330     /* case VC_EASC_FLUSH_CTRL: */
1331     case VC_EASC_FLUSH_POLL:
1332         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1333         /* EAS update */
1334         break;
1335 
1336     case VC_EASC_FLUSH_INJECT:
1337         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1338         break;
1339 
1340     case VC_ENDC_CFG:
1341         break;
1342 
1343     /*
1344      * END cache updates
1345      */
1346     case VC_ENDC_WATCH0_SPEC:
1347     case VC_ENDC_WATCH1_SPEC:
1348     case VC_ENDC_WATCH2_SPEC:
1349     case VC_ENDC_WATCH3_SPEC:
1350          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1351         break;
1352 
1353     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1354     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1355     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1356     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1357         break;
1358     case VC_ENDC_WATCH0_DATA0:
1359     case VC_ENDC_WATCH1_DATA0:
1360     case VC_ENDC_WATCH2_DATA0:
1361     case VC_ENDC_WATCH3_DATA0:
1362         /* writing to DATA0 triggers the cache write */
1363         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1364         xive->vc_regs[reg] = val;
1365         pnv_xive2_end_update(xive, watch_engine);
1366         break;
1367 
1368 
1369     /* case VC_ENDC_FLUSH_CTRL: */
1370     case VC_ENDC_FLUSH_POLL:
1371         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1372         break;
1373 
1374     case VC_ENDC_FLUSH_INJECT:
1375         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1376         break;
1377 
1378     /*
1379      * Indirect invalidation
1380      */
1381     case VC_AT_MACRO_KILL:
1382     case VC_AT_MACRO_KILL_MASK:
1383         break;
1384 
1385     /*
1386      * Interrupt fifo overflow in memory backing store (Not modeled)
1387      */
1388     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1389         break;
1390 
1391     /*
1392      * Synchronisation
1393      */
1394     case VC_ENDC_SYNC_DONE:
1395         break;
1396 
1397     default:
1398         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1399         return;
1400     }
1401 
1402     xive->vc_regs[reg] = val;
1403 }
1404 
1405 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1406     .read = pnv_xive2_ic_vc_read,
1407     .write = pnv_xive2_ic_vc_write,
1408     .endianness = DEVICE_BIG_ENDIAN,
1409     .valid = {
1410         .min_access_size = 8,
1411         .max_access_size = 8,
1412     },
1413     .impl = {
1414         .min_access_size = 8,
1415         .max_access_size = 8,
1416     },
1417 };
1418 
1419 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1420 {
1421     uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1422                                     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1423     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1424     uint8_t val;
1425 
1426     /*
1427      * We keep track of which engines are currently busy in the
1428      * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1429      * the register, we don't return its value but the ID of an engine
1430      * it can use.
1431      * There are 4 engines. 0xFF means no engine is available.
1432      */
1433     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1434     if (val != 0xFF) {
1435         xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1436     }
1437     return val;
1438 }
1439 
1440 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1441                                               uint8_t watch_engine)
1442 {
1443     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1444 
1445     pnv_xive2_cache_watch_release(&state, watch_engine);
1446     xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1447 }
1448 
1449 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1450                                      unsigned size)
1451 {
1452     PnvXive2 *xive = PNV_XIVE2(opaque);
1453     uint64_t val = -1;
1454     uint32_t reg = offset >> 3;
1455     uint8_t watch_engine;
1456 
1457     switch (offset) {
1458     /*
1459      * VSD table settings.
1460      */
1461     case PC_VSD_TABLE_ADDR:
1462     case PC_VSD_TABLE_DATA:
1463         val = xive->pc_regs[reg];
1464         break;
1465 
1466     case PC_NXC_WATCH_ASSIGN:
1467         val = pnv_xive2_nxc_cache_watch_assign(xive);
1468         break;
1469 
1470     case PC_NXC_PROC_CONFIG:
1471         val = xive->pc_regs[reg];
1472         break;
1473 
1474     /*
1475      * cache updates
1476      */
1477     case PC_NXC_WATCH0_SPEC:
1478     case PC_NXC_WATCH1_SPEC:
1479     case PC_NXC_WATCH2_SPEC:
1480     case PC_NXC_WATCH3_SPEC:
1481         watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1482         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1483         pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1484         val = xive->pc_regs[reg];
1485         break;
1486 
1487     case PC_NXC_WATCH0_DATA0:
1488     case PC_NXC_WATCH1_DATA0:
1489     case PC_NXC_WATCH2_DATA0:
1490     case PC_NXC_WATCH3_DATA0:
1491        /*
1492         * Load DATA registers from cache with data requested by the
1493         * SPEC register
1494         */
1495         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1496         pnv_xive2_nxc_cache_load(xive, watch_engine);
1497         val = xive->pc_regs[reg];
1498         break;
1499 
1500     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1501     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1502     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1503     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1504         val = xive->pc_regs[reg];
1505         break;
1506 
1507     case PC_NXC_FLUSH_CTRL:
1508         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1509         val = xive->pc_regs[reg];
1510         break;
1511 
1512     /*
1513      * Indirect invalidation
1514      */
1515     case PC_AT_KILL:
1516         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1517         val = xive->pc_regs[reg];
1518         break;
1519 
1520     default:
1521         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1522     }
1523 
1524     return val;
1525 }
1526 
1527 static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
1528 {
1529     uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
1530                             xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1531     uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
1532                            xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1533 
1534     pnv_xive2_vst_set_data(xive, vsd, type, blk);
1535 }
1536 
1537 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1538                                   uint64_t val, unsigned size)
1539 {
1540     PnvXive2 *xive = PNV_XIVE2(opaque);
1541     uint32_t reg = offset >> 3;
1542     uint8_t watch_engine;
1543 
1544     switch (offset) {
1545 
1546     /*
1547      * VSD table settings.
1548      * The Xive2Router model combines both VC and PC sub-engines. We
1549      * allow to configure the tables through both, for the rare cases
1550      * where a table only really needs to be configured for one of
1551      * them (e.g. the NVG table for the presenter). It assumes that
1552      * firmware passes the same address to the VC and PC when tables
1553      * are defined for both, which seems acceptable.
1554      */
1555     case PC_VSD_TABLE_ADDR:
1556         break;
1557     case PC_VSD_TABLE_DATA:
1558         pnv_xive2_pc_vst_set_data(xive, val);
1559         break;
1560 
1561     case PC_NXC_PROC_CONFIG:
1562         break;
1563 
1564     /*
1565      * cache updates
1566      */
1567     case PC_NXC_WATCH0_SPEC:
1568     case PC_NXC_WATCH1_SPEC:
1569     case PC_NXC_WATCH2_SPEC:
1570     case PC_NXC_WATCH3_SPEC:
1571         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1572         break;
1573 
1574     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1575     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1576     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1577     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1578         break;
1579     case PC_NXC_WATCH0_DATA0:
1580     case PC_NXC_WATCH1_DATA0:
1581     case PC_NXC_WATCH2_DATA0:
1582     case PC_NXC_WATCH3_DATA0:
1583         /* writing to DATA0 triggers the cache write */
1584         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1585         xive->pc_regs[reg] = val;
1586         pnv_xive2_nxc_update(xive, watch_engine);
1587         break;
1588 
1589    /* case PC_NXC_FLUSH_CTRL: */
1590     case PC_NXC_FLUSH_POLL:
1591         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1592         break;
1593 
1594     case PC_NXC_FLUSH_INJECT:
1595         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1596         break;
1597 
1598     /*
1599      * Indirect invalidation
1600      */
1601     case PC_AT_KILL:
1602     case PC_AT_KILL_MASK:
1603         break;
1604 
1605     default:
1606         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1607         return;
1608     }
1609 
1610     xive->pc_regs[reg] = val;
1611 }
1612 
1613 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1614     .read = pnv_xive2_ic_pc_read,
1615     .write = pnv_xive2_ic_pc_write,
1616     .endianness = DEVICE_BIG_ENDIAN,
1617     .valid = {
1618         .min_access_size = 8,
1619         .max_access_size = 8,
1620     },
1621     .impl = {
1622         .min_access_size = 8,
1623         .max_access_size = 8,
1624     },
1625 };
1626 
1627 
1628 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1629                                         unsigned size)
1630 {
1631     PnvXive2 *xive = PNV_XIVE2(opaque);
1632     uint64_t val = -1;
1633     uint32_t reg = offset >> 3;
1634 
1635     switch (offset) {
1636     /*
1637      * XIVE2 hardware thread enablement
1638      */
1639     case TCTXT_EN0:
1640     case TCTXT_EN1:
1641         val = xive->tctxt_regs[reg];
1642         break;
1643 
1644     case TCTXT_EN0_SET:
1645     case TCTXT_EN0_RESET:
1646         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1647         break;
1648     case TCTXT_EN1_SET:
1649     case TCTXT_EN1_RESET:
1650         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1651         break;
1652     case TCTXT_CFG:
1653         val = xive->tctxt_regs[reg];
1654         break;
1655     default:
1656         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1657     }
1658 
1659     return val;
1660 }
1661 
1662 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1663                                      uint64_t val, unsigned size)
1664 {
1665     PnvXive2 *xive = PNV_XIVE2(opaque);
1666     uint32_t reg = offset >> 3;
1667 
1668     switch (offset) {
1669     /*
1670      * XIVE2 hardware thread enablement
1671      */
1672     case TCTXT_EN0: /* Physical Thread Enable */
1673     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1674         xive->tctxt_regs[reg] = val;
1675         break;
1676 
1677     case TCTXT_EN0_SET:
1678         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1679         break;
1680     case TCTXT_EN1_SET:
1681         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1682         break;
1683     case TCTXT_EN0_RESET:
1684         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1685         break;
1686     case TCTXT_EN1_RESET:
1687         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1688         break;
1689     case TCTXT_CFG:
1690         xive->tctxt_regs[reg] = val;
1691         break;
1692     default:
1693         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1694         return;
1695     }
1696 }
1697 
1698 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1699     .read = pnv_xive2_ic_tctxt_read,
1700     .write = pnv_xive2_ic_tctxt_write,
1701     .endianness = DEVICE_BIG_ENDIAN,
1702     .valid = {
1703         .min_access_size = 8,
1704         .max_access_size = 8,
1705     },
1706     .impl = {
1707         .min_access_size = 8,
1708         .max_access_size = 8,
1709     },
1710 };
1711 
1712 /*
1713  * Redirect XSCOM to MMIO handlers
1714  */
1715 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1716                                      unsigned size)
1717 {
1718     PnvXive2 *xive = PNV_XIVE2(opaque);
1719     uint64_t val = -1;
1720     uint32_t xscom_reg = offset >> 3;
1721     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1722 
1723     switch (xscom_reg) {
1724     case 0x000 ... 0x0FF:
1725         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1726         break;
1727     case 0x100 ... 0x1FF:
1728         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1729         break;
1730     case 0x200 ... 0x2FF:
1731         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1732         break;
1733     case 0x300 ... 0x3FF:
1734         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1735         break;
1736     default:
1737         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1738     }
1739 
1740     return val;
1741 }
1742 
1743 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1744                                   uint64_t val, unsigned size)
1745 {
1746     PnvXive2 *xive = PNV_XIVE2(opaque);
1747     uint32_t xscom_reg = offset >> 3;
1748     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1749 
1750     switch (xscom_reg) {
1751     case 0x000 ... 0x0FF:
1752         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1753         break;
1754     case 0x100 ... 0x1FF:
1755         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1756         break;
1757     case 0x200 ... 0x2FF:
1758         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1759         break;
1760     case 0x300 ... 0x3FF:
1761         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1762         break;
1763     default:
1764         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1765     }
1766 }
1767 
1768 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1769     .read = pnv_xive2_xscom_read,
1770     .write = pnv_xive2_xscom_write,
1771     .endianness = DEVICE_BIG_ENDIAN,
1772     .valid = {
1773         .min_access_size = 8,
1774         .max_access_size = 8,
1775     },
1776     .impl = {
1777         .min_access_size = 8,
1778         .max_access_size = 8,
1779     },
1780 };
1781 
1782 /*
1783  * Notify port page. The layout is compatible between 4K and 64K pages :
1784  *
1785  * Page 1           Notify page (writes only)
1786  *  0x000 - 0x7FF   IPI interrupt (NPU)
1787  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1788  */
1789 
1790 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1791                                     uint64_t val)
1792 {
1793     uint8_t blk;
1794     uint32_t idx;
1795 
1796     if (val & XIVE_TRIGGER_END) {
1797         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1798                    addr, val);
1799         return;
1800     }
1801 
1802     /*
1803      * Forward the source event notification directly to the Router.
1804      * The source interrupt number should already be correctly encoded
1805      * with the chip block id by the sending device (PHB, PSI).
1806      */
1807     blk = XIVE_EAS_BLOCK(val);
1808     idx = XIVE_EAS_INDEX(val);
1809 
1810     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1811                          !!(val & XIVE_TRIGGER_PQ));
1812 }
1813 
1814 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1815                                       uint64_t val, unsigned size)
1816 {
1817     PnvXive2 *xive = PNV_XIVE2(opaque);
1818 
1819     /* VC: IPI triggers */
1820     switch (offset) {
1821     case 0x000 ... 0x7FF:
1822         /* TODO: check IPI notify sub-page routing */
1823         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1824         break;
1825 
1826     /* VC: HW triggers */
1827     case 0x800 ... 0xFFF:
1828         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1829         break;
1830 
1831     default:
1832         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1833     }
1834 }
1835 
1836 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1837                                          unsigned size)
1838 {
1839     PnvXive2 *xive = PNV_XIVE2(opaque);
1840 
1841    /* loads are invalid */
1842     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1843     return -1;
1844 }
1845 
1846 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1847     .read = pnv_xive2_ic_notify_read,
1848     .write = pnv_xive2_ic_notify_write,
1849     .endianness = DEVICE_BIG_ENDIAN,
1850     .valid = {
1851         .min_access_size = 8,
1852         .max_access_size = 8,
1853     },
1854     .impl = {
1855         .min_access_size = 8,
1856         .max_access_size = 8,
1857     },
1858 };
1859 
1860 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1861                                       unsigned size)
1862 {
1863     PnvXive2 *xive = PNV_XIVE2(opaque);
1864 
1865     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1866     return -1;
1867 }
1868 
1869 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1870                                    uint64_t val, unsigned size)
1871 {
1872     PnvXive2 *xive = PNV_XIVE2(opaque);
1873 
1874     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1875 }
1876 
1877 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1878     .read = pnv_xive2_ic_lsi_read,
1879     .write = pnv_xive2_ic_lsi_write,
1880     .endianness = DEVICE_BIG_ENDIAN,
1881     .valid = {
1882         .min_access_size = 8,
1883         .max_access_size = 8,
1884     },
1885     .impl = {
1886         .min_access_size = 8,
1887         .max_access_size = 8,
1888     },
1889 };
1890 
1891 /*
1892  * Sync MMIO page (write only)
1893  */
1894 #define PNV_XIVE2_SYNC_IPI              0x000
1895 #define PNV_XIVE2_SYNC_HW               0x080
1896 #define PNV_XIVE2_SYNC_NxC              0x100
1897 #define PNV_XIVE2_SYNC_INT              0x180
1898 #define PNV_XIVE2_SYNC_OS_ESC           0x200
1899 #define PNV_XIVE2_SYNC_POOL_ESC         0x280
1900 #define PNV_XIVE2_SYNC_HARD_ESC         0x300
1901 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO   0x800
1902 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO    0x880
1903 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI   0x900
1904 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI    0x980
1905 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI   0xA00
1906 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI    0xA80
1907 
1908 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1909                                        unsigned size)
1910 {
1911     PnvXive2 *xive = PNV_XIVE2(opaque);
1912 
1913     /* loads are invalid */
1914     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1915     return -1;
1916 }
1917 
1918 /*
1919  * The sync MMIO space spans two pages.  The lower page is use for
1920  * queue sync "poll" requests while the upper page is used for queue
1921  * sync "inject" requests.  Inject requests require the HW to write
1922  * a byte of all 1's to a predetermined location in memory in order
1923  * to signal completion of the request.  Both pages have the same
1924  * layout, so it is easiest to handle both with a single function.
1925  */
1926 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1927                                     uint64_t val, unsigned size)
1928 {
1929     PnvXive2 *xive = PNV_XIVE2(opaque);
1930     int inject_type;
1931     hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
1932 
1933     /* adjust offset for inject page */
1934     hwaddr adj_offset = offset & pg_offset_mask;
1935 
1936     switch (adj_offset) {
1937     case PNV_XIVE2_SYNC_IPI:
1938         inject_type = PNV_XIVE2_QUEUE_IPI;
1939         break;
1940     case PNV_XIVE2_SYNC_HW:
1941         inject_type = PNV_XIVE2_QUEUE_HW;
1942         break;
1943     case PNV_XIVE2_SYNC_NxC:
1944         inject_type = PNV_XIVE2_QUEUE_NXC;
1945         break;
1946     case PNV_XIVE2_SYNC_INT:
1947         inject_type = PNV_XIVE2_QUEUE_INT;
1948         break;
1949     case PNV_XIVE2_SYNC_OS_ESC:
1950         inject_type = PNV_XIVE2_QUEUE_OS;
1951         break;
1952     case PNV_XIVE2_SYNC_POOL_ESC:
1953         inject_type = PNV_XIVE2_QUEUE_POOL;
1954         break;
1955     case PNV_XIVE2_SYNC_HARD_ESC:
1956         inject_type = PNV_XIVE2_QUEUE_HARD;
1957         break;
1958     case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
1959         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
1960         break;
1961     case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
1962         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
1963         break;
1964     case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
1965         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
1966         break;
1967     case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
1968         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
1969         break;
1970     case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
1971         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
1972         break;
1973     case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
1974         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
1975         break;
1976     default:
1977         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1978         return;
1979     }
1980 
1981     /* Write Queue Sync notification byte if writing to sync inject page */
1982     if ((offset & ~pg_offset_mask) != 0) {
1983         pnv_xive2_inject_notify(xive, inject_type);
1984     }
1985 }
1986 
1987 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1988     .read = pnv_xive2_ic_sync_read,
1989     .write = pnv_xive2_ic_sync_write,
1990     .endianness = DEVICE_BIG_ENDIAN,
1991     .valid = {
1992         .min_access_size = 8,
1993         .max_access_size = 8,
1994     },
1995     .impl = {
1996         .min_access_size = 8,
1997         .max_access_size = 8,
1998     },
1999 };
2000 
2001 /*
2002  * When the TM direct pages of the IC controller are accessed, the
2003  * target HW thread is deduced from the page offset.
2004  */
2005 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
2006 {
2007     /* On P10, the node ID shift in the PIR register is 8 bits */
2008     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
2009 }
2010 
2011 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
2012                                                    hwaddr offset)
2013 {
2014     /*
2015      * Indirect TIMA accesses are similar to direct accesses for
2016      * privilege ring 0. So remove any traces of the hw thread ID from
2017      * the offset in the IC BAR as it could be interpreted as the ring
2018      * privilege when calling the underlying direct access functions.
2019      */
2020     return offset & ((1ull << xive->ic_shift) - 1);
2021 }
2022 
2023 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
2024 {
2025     PnvChip *chip = xive->chip;
2026     PowerPCCPU *cpu = NULL;
2027 
2028     cpu = pnv_chip_find_cpu(chip, pir);
2029     if (!cpu) {
2030         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
2031         return NULL;
2032     }
2033 
2034     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
2035         xive2_error(xive, "IC: CPU %x is not enabled", pir);
2036     }
2037 
2038     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2039 }
2040 
2041 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
2042                                               unsigned size)
2043 {
2044     PnvXive2 *xive = PNV_XIVE2(opaque);
2045     XivePresenter *xptr = XIVE_PRESENTER(xive);
2046     hwaddr hw_page_offset;
2047     uint32_t pir;
2048     XiveTCTX *tctx;
2049     uint64_t val = -1;
2050 
2051     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2052     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2053     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2054     if (tctx) {
2055         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2056     }
2057 
2058     return val;
2059 }
2060 
2061 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2062                                            uint64_t val, unsigned size)
2063 {
2064     PnvXive2 *xive = PNV_XIVE2(opaque);
2065     XivePresenter *xptr = XIVE_PRESENTER(xive);
2066     hwaddr hw_page_offset;
2067     uint32_t pir;
2068     XiveTCTX *tctx;
2069 
2070     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2071     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2072     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2073     if (tctx) {
2074         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2075     }
2076 }
2077 
2078 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2079     .read = pnv_xive2_ic_tm_indirect_read,
2080     .write = pnv_xive2_ic_tm_indirect_write,
2081     .endianness = DEVICE_BIG_ENDIAN,
2082     .valid = {
2083         .min_access_size = 1,
2084         .max_access_size = 8,
2085     },
2086     .impl = {
2087         .min_access_size = 1,
2088         .max_access_size = 8,
2089     },
2090 };
2091 
2092 /*
2093  * TIMA ops
2094  */
2095 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2096                                uint64_t value, unsigned size)
2097 {
2098     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2099     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2100     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2101     XivePresenter *xptr = XIVE_PRESENTER(xive);
2102 
2103     xive_tctx_tm_write(xptr, tctx, offset, value, size);
2104 }
2105 
2106 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2107 {
2108     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2109     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2110     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2111     XivePresenter *xptr = XIVE_PRESENTER(xive);
2112 
2113     return xive_tctx_tm_read(xptr, tctx, offset, size);
2114 }
2115 
2116 static const MemoryRegionOps pnv_xive2_tm_ops = {
2117     .read = pnv_xive2_tm_read,
2118     .write = pnv_xive2_tm_write,
2119     .endianness = DEVICE_BIG_ENDIAN,
2120     .valid = {
2121         .min_access_size = 1,
2122         .max_access_size = 8,
2123     },
2124     .impl = {
2125         .min_access_size = 1,
2126         .max_access_size = 8,
2127     },
2128 };
2129 
2130 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
2131                                    unsigned size)
2132 {
2133     PnvXive2 *xive = PNV_XIVE2(opaque);
2134 
2135     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
2136     return -1;
2137 }
2138 
2139 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
2140                                 uint64_t val, unsigned size)
2141 {
2142     PnvXive2 *xive = PNV_XIVE2(opaque);
2143 
2144     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
2145 }
2146 
2147 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2148     .read = pnv_xive2_nvc_read,
2149     .write = pnv_xive2_nvc_write,
2150     .endianness = DEVICE_BIG_ENDIAN,
2151     .valid = {
2152         .min_access_size = 8,
2153         .max_access_size = 8,
2154     },
2155     .impl = {
2156         .min_access_size = 8,
2157         .max_access_size = 8,
2158     },
2159 };
2160 
2161 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
2162                                     unsigned size)
2163 {
2164     PnvXive2 *xive = PNV_XIVE2(opaque);
2165 
2166     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
2167     return -1;
2168 }
2169 
2170 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
2171                                  uint64_t val, unsigned size)
2172 {
2173     PnvXive2 *xive = PNV_XIVE2(opaque);
2174 
2175     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
2176 }
2177 
2178 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2179     .read = pnv_xive2_nvpg_read,
2180     .write = pnv_xive2_nvpg_write,
2181     .endianness = DEVICE_BIG_ENDIAN,
2182     .valid = {
2183         .min_access_size = 8,
2184         .max_access_size = 8,
2185     },
2186     .impl = {
2187         .min_access_size = 8,
2188         .max_access_size = 8,
2189     },
2190 };
2191 
2192 /*
2193  * POWER10 default capabilities: 0x2000120076f000FC
2194  */
2195 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
2196 
2197 /*
2198  * POWER10 default configuration: 0x0030000033000000
2199  *
2200  * 8bits thread id was dropped for P10
2201  */
2202 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2203 
2204 static void pnv_xive2_reset(void *dev)
2205 {
2206     PnvXive2 *xive = PNV_XIVE2(dev);
2207     XiveSource *xsrc = &xive->ipi_source;
2208     Xive2EndSource *end_xsrc = &xive->end_source;
2209 
2210     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2211     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2212 
2213     /* HW hardwires the #Topology of the chip in the block field */
2214     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2215         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2216 
2217     /* VC and PC cache watch assign mechanism */
2218     xive->vc_regs[VC_ENDC_CFG >> 3] =
2219         SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2220     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2221         SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2222 
2223     /* Set default page size to 64k */
2224     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2225     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2226 
2227     /* Clear source MMIOs */
2228     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2229         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2230     }
2231 
2232     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2233         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2234     }
2235 }
2236 
2237 /*
2238  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2239  *  software.
2240  */
2241 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2242 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2243 
2244 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2245 {
2246     PnvXive2 *xive = PNV_XIVE2(dev);
2247     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2248     XiveSource *xsrc = &xive->ipi_source;
2249     Xive2EndSource *end_xsrc = &xive->end_source;
2250     Error *local_err = NULL;
2251     int i;
2252 
2253     pxc->parent_realize(dev, &local_err);
2254     if (local_err) {
2255         error_propagate(errp, local_err);
2256         return;
2257     }
2258 
2259     assert(xive->chip);
2260 
2261     /*
2262      * The XiveSource and Xive2EndSource objects are realized with the
2263      * maximum allowed HW configuration. The ESB MMIO regions will be
2264      * resized dynamically when the controller is configured by the FW
2265      * to limit accesses to resources not provisioned.
2266      */
2267     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2268                             &error_fatal);
2269     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2270                             &error_fatal);
2271     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2272                              &error_fatal);
2273     qdev_realize(DEVICE(xsrc), NULL, &local_err);
2274     if (local_err) {
2275         error_propagate(errp, local_err);
2276         return;
2277     }
2278 
2279     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2280                             &error_fatal);
2281     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2282                              &error_abort);
2283     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2284     if (local_err) {
2285         error_propagate(errp, local_err);
2286         return;
2287     }
2288 
2289     /* XSCOM region, used for initial configuration of the BARs */
2290     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2291                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
2292                           PNV10_XSCOM_XIVE2_SIZE << 3);
2293 
2294     /* Interrupt controller MMIO regions */
2295     xive->ic_shift = 16;
2296     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2297                        PNV10_XIVE2_IC_SIZE);
2298 
2299     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2300         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2301                          pnv_xive2_ic_regions[i].ops, xive,
2302                          pnv_xive2_ic_regions[i].name,
2303                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2304     }
2305 
2306     /*
2307      * VC MMIO regions.
2308      */
2309     xive->esb_shift = 16;
2310     xive->end_shift = 16;
2311     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2312                        PNV10_XIVE2_ESB_SIZE);
2313     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2314                        PNV10_XIVE2_END_SIZE);
2315 
2316     /* Presenter Controller MMIO region (not modeled) */
2317     xive->nvc_shift = 16;
2318     xive->nvpg_shift = 16;
2319     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2320                           &pnv_xive2_nvc_ops, xive,
2321                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2322 
2323     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2324                           &pnv_xive2_nvpg_ops, xive,
2325                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2326 
2327     /* Thread Interrupt Management Area (Direct) */
2328     xive->tm_shift = 16;
2329     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2330                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2331 
2332     qemu_register_reset(pnv_xive2_reset, dev);
2333 }
2334 
2335 static Property pnv_xive2_properties[] = {
2336     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2337     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2338     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2339     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2340     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2341     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2342     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2343                        PNV_XIVE2_CAPABILITIES),
2344     DEFINE_PROP_UINT64("config", PnvXive2, config,
2345                        PNV_XIVE2_CONFIGURATION),
2346     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2347     DEFINE_PROP_END_OF_LIST(),
2348 };
2349 
2350 static void pnv_xive2_instance_init(Object *obj)
2351 {
2352     PnvXive2 *xive = PNV_XIVE2(obj);
2353 
2354     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2355                             TYPE_XIVE_SOURCE);
2356     object_initialize_child(obj, "end_source", &xive->end_source,
2357                             TYPE_XIVE2_END_SOURCE);
2358 }
2359 
2360 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2361                               int xscom_offset)
2362 {
2363     const char compat_p10[] = "ibm,power10-xive-x";
2364     char *name;
2365     int offset;
2366     uint32_t reg[] = {
2367         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2368         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2369     };
2370 
2371     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2372     offset = fdt_add_subnode(fdt, xscom_offset, name);
2373     _FDT(offset);
2374     g_free(name);
2375 
2376     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2377     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2378                      sizeof(compat_p10)));
2379     return 0;
2380 }
2381 
2382 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
2383 {
2384     DeviceClass *dc = DEVICE_CLASS(klass);
2385     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2386     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2387     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2388     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2389     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2390 
2391     xdc->dt_xscom  = pnv_xive2_dt_xscom;
2392 
2393     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2394     device_class_set_parent_realize(dc, pnv_xive2_realize,
2395                                     &pxc->parent_realize);
2396     device_class_set_props(dc, pnv_xive2_properties);
2397 
2398     xrc->get_eas   = pnv_xive2_get_eas;
2399     xrc->get_pq    = pnv_xive2_get_pq;
2400     xrc->set_pq    = pnv_xive2_set_pq;
2401     xrc->get_end   = pnv_xive2_get_end;
2402     xrc->write_end = pnv_xive2_write_end;
2403     xrc->get_nvp   = pnv_xive2_get_nvp;
2404     xrc->write_nvp = pnv_xive2_write_nvp;
2405     xrc->get_config  = pnv_xive2_get_config;
2406     xrc->get_block_id = pnv_xive2_get_block_id;
2407 
2408     xnc->notify    = pnv_xive2_notify;
2409 
2410     xpc->match_nvt  = pnv_xive2_match_nvt;
2411     xpc->get_config = pnv_xive2_presenter_get_config;
2412 };
2413 
2414 static const TypeInfo pnv_xive2_info = {
2415     .name          = TYPE_PNV_XIVE2,
2416     .parent        = TYPE_XIVE2_ROUTER,
2417     .instance_init = pnv_xive2_instance_init,
2418     .instance_size = sizeof(PnvXive2),
2419     .class_init    = pnv_xive2_class_init,
2420     .class_size    = sizeof(PnvXive2Class),
2421     .interfaces    = (InterfaceInfo[]) {
2422         { TYPE_PNV_XSCOM_INTERFACE },
2423         { }
2424     }
2425 };
2426 
2427 static void pnv_xive2_register_types(void)
2428 {
2429     type_register_static(&pnv_xive2_info);
2430 }
2431 
2432 type_init(pnv_xive2_register_types)
2433 
2434 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
2435                                      GString *buf)
2436 {
2437     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
2438     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
2439 
2440     if (!xive2_nvp_is_valid(nvp)) {
2441         return;
2442     }
2443 
2444     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x",
2445                            nvp_idx, eq_blk, eq_idx,
2446                            xive_get_field32(NVP2_W2_IPB, nvp->w2));
2447     /*
2448      * When the NVP is HW controlled, more fields are updated
2449      */
2450     if (xive2_nvp_is_hw(nvp)) {
2451         g_string_append_printf(buf, " CPPR:%02x",
2452                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
2453         if (xive2_nvp_is_co(nvp)) {
2454             g_string_append_printf(buf, " CO:%04x",
2455                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
2456         }
2457     }
2458     g_string_append_c(buf, '\n');
2459 }
2460 
2461 /*
2462  * If the table is direct, we can compute the number of PQ entries
2463  * provisioned by FW.
2464  */
2465 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2466 {
2467     uint8_t blk = pnv_xive2_block_id(xive);
2468     uint64_t vsd = xive->vsds[VST_ESB][blk];
2469     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2470 
2471     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2472 }
2473 
2474 /*
2475  * Compute the number of entries per indirect subpage.
2476  */
2477 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2478 {
2479     uint8_t blk = pnv_xive2_block_id(xive);
2480     uint64_t vsd = xive->vsds[type][blk];
2481     const XiveVstInfo *info = &vst_infos[type];
2482     uint64_t vsd_addr;
2483     uint32_t page_shift;
2484 
2485     /* For direct tables, fake a valid value */
2486     if (!(VSD_INDIRECT & vsd)) {
2487         return 1;
2488     }
2489 
2490     /* Get the page size of the indirect table. */
2491     vsd_addr = vsd & VSD_ADDRESS_MASK;
2492     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2493 
2494     if (!(vsd & VSD_ADDRESS_MASK)) {
2495 #ifdef XIVE2_DEBUG
2496         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2497 #endif
2498         return 0;
2499     }
2500 
2501     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2502 
2503     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2504         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2505                    page_shift);
2506         return 0;
2507     }
2508 
2509     return (1ull << page_shift) / info->size;
2510 }
2511 
2512 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2513 {
2514     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2515     uint8_t blk = pnv_xive2_block_id(xive);
2516     uint8_t chip_id = xive->chip->chip_id;
2517     uint32_t srcno0 = XIVE_EAS(blk, 0);
2518     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2519     Xive2Eas eas;
2520     Xive2End end;
2521     Xive2Nvp nvp;
2522     int i;
2523     uint64_t xive_nvp_per_subpage;
2524 
2525     g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2526                            blk, srcno0, srcno0 + nr_esbs - 1);
2527     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2528 
2529     g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2530                            blk, srcno0, srcno0 + nr_esbs - 1);
2531     for (i = 0; i < nr_esbs; i++) {
2532         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2533             break;
2534         }
2535         if (!xive2_eas_is_masked(&eas)) {
2536             xive2_eas_pic_print_info(&eas, i, buf);
2537         }
2538     }
2539 
2540     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2541                            chip_id, blk);
2542     i = 0;
2543     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2544         xive2_end_eas_pic_print_info(&end, i++, buf);
2545     }
2546 
2547     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2548     i = 0;
2549     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2550         xive2_end_pic_print_info(&end, i++, buf);
2551     }
2552 
2553     g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2554                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2555     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2556     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2557         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2558             xive2_nvp_pic_print_info(&nvp, i++, buf);
2559         }
2560     }
2561 }
2562