xref: /openbmc/qemu/hw/intc/pnv_xive2.c (revision 1775b7d1091452dab24ef23ddc1b7c1943a5e9e4)
1 /*
2  * QEMU PowerPC XIVE2 interrupt controller model  (POWER10)
3  *
4  * Copyright (c) 2019-2022, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/ppc/fdt.h"
17 #include "hw/ppc/pnv.h"
18 #include "hw/ppc/pnv_chip.h"
19 #include "hw/ppc/pnv_core.h"
20 #include "hw/ppc/pnv_xscom.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/pnv_xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/ppc/xive2_regs.h"
25 #include "hw/ppc/ppc.h"
26 #include "hw/qdev-properties.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/qtest.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive2_regs.h"
33 
34 #undef XIVE2_DEBUG
35 
36 /* XIVE Sync or Flush Notification Block */
37 typedef struct XiveSfnBlock {
38     uint8_t bytes[32];
39 } XiveSfnBlock;
40 
41 /* XIVE Thread Sync or Flush Notification Area */
42 typedef struct XiveThreadNA {
43     XiveSfnBlock topo[16];
44 } XiveThreadNA;
45 
46 /*
47  * Virtual structures table (VST)
48  */
49 #define SBE_PER_BYTE   4
50 
51 typedef struct XiveVstInfo {
52     const char *name;
53     uint32_t    size;
54     uint32_t    max_blocks;
55 } XiveVstInfo;
56 
57 static const XiveVstInfo vst_infos[] = {
58 
59     [VST_EAS]  = { "EAT",  sizeof(Xive2Eas),     16 },
60     [VST_ESB]  = { "ESB",  1,                    16 },
61     [VST_END]  = { "ENDT", sizeof(Xive2End),     16 },
62 
63     [VST_NVP]  = { "NVPT", sizeof(Xive2Nvp),     16 },
64     [VST_NVG]  = { "NVGT", sizeof(Xive2Nvgc),    16 },
65     [VST_NVC]  = { "NVCT", sizeof(Xive2Nvgc),    16 },
66 
67     [VST_IC]  =  { "IC",   1, /* ? */            16 }, /* Topology # */
68     [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
69 
70     /*
71      * This table contains the backing store pages for the interrupt
72      * fifos of the VC sub-engine in case of overflow.
73      *
74      * 0 - IPI,
75      * 1 - HWD,
76      * 2 - NxC,
77      * 3 - INT,
78      * 4 - OS-Queue,
79      * 5 - Pool-Queue,
80      * 6 - Hard-Queue
81      */
82     [VST_ERQ]  = { "ERQ",  1,                   VC_QUEUE_COUNT },
83 };
84 
85 #define xive2_error(xive, fmt, ...)                                      \
86     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
87                   (xive)->chip->chip_id, ## __VA_ARGS__);
88 
89 /*
90  * TODO: Document block id override
91  */
92 static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
93 {
94     uint8_t blk = xive->chip->chip_id;
95     uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
96 
97     if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
98         blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
99     }
100 
101     return blk;
102 }
103 
104 /*
105  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
106  * of the chips is good enough.
107  *
108  * TODO: Block scope support
109  */
110 static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
111 {
112     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
113     int i;
114 
115     for (i = 0; i < pnv->num_chips; i++) {
116         Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
117         PnvXive2 *xive = &chip10->xive;
118 
119         if (pnv_xive2_block_id(xive) == blk) {
120             return xive;
121         }
122     }
123     return NULL;
124 }
125 
126 /*
127  * VST accessors for ESB, EAT, ENDT, NVP
128  *
129  * Indirect VST tables are arrays of VSDs pointing to a page (of same
130  * size). Each page is a direct VST table.
131  */
132 
133 #define XIVE_VSD_SIZE 8
134 
135 /* Indirect page size can be 4K, 64K, 2M, 16M. */
136 static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
137 {
138      return page_shift == 12 || page_shift == 16 ||
139          page_shift == 21 || page_shift == 24;
140 }
141 
142 static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
143                                           uint64_t vsd, uint32_t idx)
144 {
145     const XiveVstInfo *info = &vst_infos[type];
146     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
147     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
148     uint32_t idx_max;
149 
150     idx_max = vst_tsize / info->size - 1;
151     if (idx > idx_max) {
152 #ifdef XIVE2_DEBUG
153         xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
154                    info->name, idx, idx_max);
155 #endif
156         return 0;
157     }
158 
159     return vst_addr + idx * info->size;
160 }
161 
162 static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
163                                             uint64_t vsd, uint32_t idx)
164 {
165     const XiveVstInfo *info = &vst_infos[type];
166     uint64_t vsd_addr;
167     uint32_t vsd_idx;
168     uint32_t page_shift;
169     uint32_t vst_per_page;
170 
171     /* Get the page size of the indirect table. */
172     vsd_addr = vsd & VSD_ADDRESS_MASK;
173     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
174 
175     if (!(vsd & VSD_ADDRESS_MASK)) {
176 #ifdef XIVE2_DEBUG
177         xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
178 #endif
179         return 0;
180     }
181 
182     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
183 
184     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
185         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
186                    page_shift);
187         return 0;
188     }
189 
190     vst_per_page = (1ull << page_shift) / info->size;
191     vsd_idx = idx / vst_per_page;
192 
193     /* Load the VSD we are looking for, if not already done */
194     if (vsd_idx) {
195         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
196         ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
197                    MEMTXATTRS_UNSPECIFIED);
198 
199         if (!(vsd & VSD_ADDRESS_MASK)) {
200 #ifdef XIVE2_DEBUG
201             xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
202 #endif
203             return 0;
204         }
205 
206         /*
207          * Check that the pages have a consistent size across the
208          * indirect table
209          */
210         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
211             xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
212                        info->name, idx);
213             return 0;
214         }
215     }
216 
217     return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
218 }
219 
220 static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
221                                    uint32_t idx)
222 {
223     const XiveVstInfo *info = &vst_infos[type];
224     uint64_t vsd;
225 
226     if (blk >= info->max_blocks) {
227         xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
228                    blk, info->name, idx);
229         return 0;
230     }
231 
232     vsd = xive->vsds[type][blk];
233 
234     /* Remote VST access */
235     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
236         xive = pnv_xive2_get_remote(blk);
237 
238         return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
239     }
240 
241     if (VSD_INDIRECT & vsd) {
242         return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
243     }
244 
245     return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
246 }
247 
248 static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
249                              uint32_t idx, void *data)
250 {
251     const XiveVstInfo *info = &vst_infos[type];
252     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
253     MemTxResult result;
254 
255     if (!addr) {
256         return -1;
257     }
258 
259     result = address_space_read(&address_space_memory, addr,
260                                 MEMTXATTRS_UNSPECIFIED, data,
261                                 info->size);
262     if (result != MEMTX_OK) {
263         xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
264                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
265         return -1;
266     }
267     return 0;
268 }
269 
270 #define XIVE_VST_WORD_ALL -1
271 
272 static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
273                                uint32_t idx, void *data, uint32_t word_number)
274 {
275     const XiveVstInfo *info = &vst_infos[type];
276     uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
277     MemTxResult result;
278 
279     if (!addr) {
280         return -1;
281     }
282 
283     if (word_number == XIVE_VST_WORD_ALL) {
284         result = address_space_write(&address_space_memory, addr,
285                                      MEMTXATTRS_UNSPECIFIED, data,
286                                      info->size);
287     } else {
288         result = address_space_write(&address_space_memory,
289                                      addr + word_number * 4,
290                                      MEMTXATTRS_UNSPECIFIED,
291                                      data + word_number * 4, 4);
292     }
293 
294     if (result != MEMTX_OK) {
295         xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
296                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
297         return -1;
298     }
299     return 0;
300 }
301 
302 static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
303                              uint8_t *pq)
304 {
305     PnvXive2 *xive = PNV_XIVE2(xrtr);
306 
307     if (pnv_xive2_block_id(xive) != blk) {
308         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
309         return -1;
310     }
311 
312     *pq = xive_source_esb_get(&xive->ipi_source, idx);
313     return 0;
314 }
315 
316 static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
317                              uint8_t *pq)
318 {
319     PnvXive2 *xive = PNV_XIVE2(xrtr);
320 
321     if (pnv_xive2_block_id(xive) != blk) {
322         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
323         return -1;
324     }
325 
326     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
327     return 0;
328 }
329 
330 static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
331                              Xive2End *end)
332 {
333     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
334 }
335 
336 static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
337                                Xive2End *end, uint8_t word_number)
338 {
339     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
340                               word_number);
341 }
342 
343 static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
344 {
345     if (!qtest_enabled()) {
346         PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
347         return ppc_cpu_pir(cpu);
348     }
349     return 0;
350 }
351 
352 /*
353  * After SW injects a Queue Sync or Cache Flush operation, HW will notify
354  * SW of the completion of the operation by writing a byte of all 1's (0xff)
355  * to a specific memory location.  The memory location is calculated by first
356  * looking up a base address in the SYNC VSD using the Topology ID of the
357  * originating thread as the "block" number.  This points to a
358  * 64k block of memory that is further divided into 128 512 byte chunks of
359  * memory, which is indexed by the thread id of the requesting thread.
360  * Finally, this 512 byte chunk of memory is divided into 16 32 byte
361  * chunks which are indexed by the topology id of the targeted IC's chip.
362  * The values below are the offsets into that 32 byte chunk of memory for
363  * each type of cache flush or queue sync operation.
364  */
365 #define PNV_XIVE2_QUEUE_IPI              0x00
366 #define PNV_XIVE2_QUEUE_HW               0x01
367 #define PNV_XIVE2_QUEUE_NXC              0x02
368 #define PNV_XIVE2_QUEUE_INT              0x03
369 #define PNV_XIVE2_QUEUE_OS               0x04
370 #define PNV_XIVE2_QUEUE_POOL             0x05
371 #define PNV_XIVE2_QUEUE_HARD             0x06
372 #define PNV_XIVE2_CACHE_ENDC             0x08
373 #define PNV_XIVE2_CACHE_ESBC             0x09
374 #define PNV_XIVE2_CACHE_EASC             0x0a
375 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO   0x10
376 #define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO    0x11
377 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI   0x12
378 #define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI    0x13
379 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI   0x14
380 #define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI    0x15
381 #define PNV_XIVE2_CACHE_NXC              0x18
382 
383 static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
384 {
385     uint64_t addr;
386     int pir = pnv_xive2_get_current_pir(xive);
387     int thread_nr = PNV10_PIR2THREAD(pir);
388     int thread_topo_id = PNV10_PIR2CHIP(pir);
389     int ic_topo_id = xive->chip->chip_id;
390     uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
391     uint8_t byte = 0xff;
392     MemTxResult result;
393 
394     /* Retrieve the address of requesting thread's notification area */
395     addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
396 
397     if (!addr) {
398         xive2_error(xive, "VST: no SYNC entry %x/%x !?",
399                     thread_topo_id, thread_nr);
400         return -1;
401     }
402 
403     address_space_stb(&address_space_memory, addr + offset + type, byte,
404                       MEMTXATTRS_UNSPECIFIED, &result);
405     assert(result == MEMTX_OK);
406 
407     return 0;
408 }
409 
410 static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
411 {
412     uint8_t  blk;
413     uint32_t idx;
414     int i, spec_reg, data_reg;
415     uint64_t endc_watch[4];
416 
417     assert(watch_engine < ARRAY_SIZE(endc_watch));
418 
419     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
420     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
421     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
422     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
423 
424     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
425         endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
426     }
427 
428     return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
429                               XIVE_VST_WORD_ALL);
430 }
431 
432 static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
433 {
434     uint8_t  blk;
435     uint32_t idx;
436     uint64_t endc_watch[4] = { 0 };
437     int i, spec_reg, data_reg;
438 
439     assert(watch_engine < ARRAY_SIZE(endc_watch));
440 
441     spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
442     data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
443     blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
444     idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
445 
446     if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
447         xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
448     }
449 
450     for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
451         xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
452     }
453 }
454 
455 static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
456                              Xive2Nvp *nvp)
457 {
458     return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
459 }
460 
461 static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
462                                Xive2Nvp *nvp, uint8_t word_number)
463 {
464     return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
465                               word_number);
466 }
467 
468 static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
469 {
470     switch (nxc_type) {
471     case PC_NXC_WATCH_NXC_NVP:
472         *table_type = VST_NVP;
473         break;
474     case PC_NXC_WATCH_NXC_NVG:
475         *table_type = VST_NVG;
476         break;
477     case PC_NXC_WATCH_NXC_NVC:
478         *table_type = VST_NVC;
479         break;
480     default:
481         qemu_log_mask(LOG_GUEST_ERROR,
482                       "XIVE: invalid table type for nxc operation\n");
483         return -1;
484     }
485     return 0;
486 }
487 
488 static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
489 {
490     uint8_t  blk, nxc_type;
491     uint32_t idx, table_type = -1;
492     int i, spec_reg, data_reg;
493     uint64_t nxc_watch[4];
494 
495     assert(watch_engine < ARRAY_SIZE(nxc_watch));
496 
497     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
498     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
499     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
500     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
501     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
502 
503     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
504 
505     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
506         nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
507     }
508 
509     return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
510                               XIVE_VST_WORD_ALL);
511 }
512 
513 static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
514 {
515     uint8_t  blk, nxc_type;
516     uint32_t idx, table_type = -1;
517     uint64_t nxc_watch[4] = { 0 };
518     int i, spec_reg, data_reg;
519 
520     assert(watch_engine < ARRAY_SIZE(nxc_watch));
521 
522     spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
523     data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
524     nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
525     blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
526     idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
527 
528     assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
529 
530     if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
531         xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
532                     blk, idx, vst_infos[table_type].name);
533     }
534 
535     for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
536         xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
537     }
538 }
539 
540 static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
541                             Xive2Eas *eas)
542 {
543     PnvXive2 *xive = PNV_XIVE2(xrtr);
544 
545     if (pnv_xive2_block_id(xive) != blk) {
546         xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
547         return -1;
548     }
549 
550     return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
551 }
552 
553 static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
554 {
555     PnvXive2 *xive = PNV_XIVE2(xrtr);
556     uint32_t cfg = 0;
557 
558     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
559         cfg |= XIVE2_GEN1_TIMA_OS;
560     }
561 
562     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
563         cfg |= XIVE2_VP_SAVE_RESTORE;
564     }
565 
566     if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
567               xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
568         cfg |= XIVE2_THREADID_8BITS;
569     }
570 
571     return cfg;
572 }
573 
574 static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
575 {
576     int pir = ppc_cpu_pir(cpu);
577     uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
578     uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
579     uint32_t bit = pir & 0x3f;
580 
581     return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
582 }
583 
584 static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
585                                uint8_t nvt_blk, uint32_t nvt_idx,
586                                bool cam_ignore, uint8_t priority,
587                                uint32_t logic_serv, XiveTCTXMatch *match)
588 {
589     PnvXive2 *xive = PNV_XIVE2(xptr);
590     PnvChip *chip = xive->chip;
591     int count = 0;
592     int i, j;
593     bool gen1_tima_os =
594         xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
595 
596     for (i = 0; i < chip->nr_cores; i++) {
597         PnvCore *pc = chip->cores[i];
598         CPUCore *cc = CPU_CORE(pc);
599 
600         for (j = 0; j < cc->nr_threads; j++) {
601             PowerPCCPU *cpu = pc->threads[j];
602             XiveTCTX *tctx;
603             int ring;
604 
605             if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
606                 continue;
607             }
608 
609             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
610 
611             if (gen1_tima_os) {
612                 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
613                                                  nvt_idx, cam_ignore,
614                                                  logic_serv);
615             } else {
616                 ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
617                                                    nvt_idx, cam_ignore,
618                                                    logic_serv);
619             }
620 
621             /*
622              * Save the context and follow on to catch duplicates,
623              * that we don't support yet.
624              */
625             if (ring != -1) {
626                 if (match->tctx) {
627                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
628                                   "thread context NVT %x/%x\n",
629                                   nvt_blk, nvt_idx);
630                     return false;
631                 }
632 
633                 match->ring = ring;
634                 match->tctx = tctx;
635                 count++;
636             }
637         }
638     }
639 
640     return count;
641 }
642 
643 static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
644 {
645     PnvXive2 *xive = PNV_XIVE2(xptr);
646     uint32_t cfg = 0;
647 
648     if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
649         cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
650     }
651     return cfg;
652 }
653 
654 static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
655 {
656     return pnv_xive2_block_id(PNV_XIVE2(xrtr));
657 }
658 
659 /*
660  * The TIMA MMIO space is shared among the chips and to identify the
661  * chip from which the access is being done, we extract the chip id
662  * from the PIR.
663  */
664 static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
665 {
666     int pir = ppc_cpu_pir(cpu);
667     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
668     PnvXive2 *xive = PNV_XIVE2(xptr);
669 
670     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
671         xive2_error(xive, "IC: CPU %x is not enabled", pir);
672     }
673     return xive;
674 }
675 
676 /*
677  * The internal sources of the interrupt controller have no knowledge
678  * of the XIVE2 chip on which they reside. Encode the block id in the
679  * source interrupt number before forwarding the source event
680  * notification to the Router. This is required on a multichip system.
681  */
682 static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
683 {
684     PnvXive2 *xive = PNV_XIVE2(xn);
685     uint8_t blk = pnv_xive2_block_id(xive);
686 
687     xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
688 }
689 
690 /*
691  * Set Translation Tables
692  *
693  * TODO add support for multiple sets
694  */
695 static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
696 {
697     uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
698     uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
699                                   xive->cq_regs[CQ_TAR >> 3]);
700 
701     switch (tsel) {
702     case CQ_TAR_NVPG:
703     case CQ_TAR_ESB:
704     case CQ_TAR_END:
705         xive->tables[tsel][entry] = val;
706         break;
707     default:
708         xive2_error(xive, "IC: unsupported table %d", tsel);
709         return -1;
710     }
711 
712     if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
713         xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
714                      xive->cq_regs[CQ_TAR >> 3], ++entry);
715     }
716 
717     return 0;
718 }
719 /*
720  * Virtual Structure Tables (VST) configuration
721  */
722 static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
723                                         uint8_t blk, uint64_t vsd)
724 {
725     Xive2EndSource *end_xsrc = &xive->end_source;
726     XiveSource *xsrc = &xive->ipi_source;
727     const XiveVstInfo *info = &vst_infos[type];
728     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
729     uint64_t vst_tsize = 1ull << page_shift;
730     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
731 
732     /* Basic checks */
733 
734     if (VSD_INDIRECT & vsd) {
735         if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
736             xive2_error(xive, "VST: invalid %s page shift %d", info->name,
737                        page_shift);
738             return;
739         }
740     }
741 
742     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
743         xive2_error(xive, "VST: %s table address 0x%"PRIx64
744                     " is not aligned with page shift %d",
745                     info->name, vst_addr, page_shift);
746         return;
747     }
748 
749     /* Record the table configuration (in SRAM on HW) */
750     xive->vsds[type][blk] = vsd;
751 
752     /* Now tune the models with the configuration provided by the FW */
753 
754     switch (type) {
755     case VST_ESB:
756         /*
757          * Backing store pages for the source PQ bits. The model does
758          * not use these PQ bits backed in RAM because the XiveSource
759          * model has its own.
760          *
761          * If the table is direct, we can compute the number of PQ
762          * entries provisioned by FW (such as skiboot) and resize the
763          * ESB window accordingly.
764          */
765         if (memory_region_is_mapped(&xsrc->esb_mmio)) {
766             memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
767         }
768         if (!(VSD_INDIRECT & vsd)) {
769             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
770                                    * (1ull << xsrc->esb_shift));
771         }
772 
773         memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
774         break;
775 
776     case VST_EAS:  /* Nothing to be done */
777         break;
778 
779     case VST_END:
780         /*
781          * Backing store pages for the END.
782          */
783         if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
784             memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
785         }
786         if (!(VSD_INDIRECT & vsd)) {
787             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
788                                    * (1ull << end_xsrc->esb_shift));
789         }
790         memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
791         break;
792 
793     case VST_NVP:  /* Not modeled */
794     case VST_NVG:  /* Not modeled */
795     case VST_NVC:  /* Not modeled */
796     case VST_IC:   /* Not modeled */
797     case VST_SYNC: /* Not modeled */
798     case VST_ERQ:  /* Not modeled */
799         break;
800 
801     default:
802         g_assert_not_reached();
803     }
804 }
805 
806 /*
807  * Both PC and VC sub-engines are configured as each use the Virtual
808  * Structure Tables
809  */
810 static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
811                                    uint8_t type, uint8_t blk)
812 {
813     uint8_t mode = GETFIELD(VSD_MODE, vsd);
814     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
815 
816     if (type > VST_ERQ) {
817         xive2_error(xive, "VST: invalid table type %d", type);
818         return;
819     }
820 
821     if (blk >= vst_infos[type].max_blocks) {
822         xive2_error(xive, "VST: invalid block id %d for"
823                       " %s table", blk, vst_infos[type].name);
824         return;
825     }
826 
827     if (!vst_addr) {
828         xive2_error(xive, "VST: invalid %s table address",
829                    vst_infos[type].name);
830         return;
831     }
832 
833     switch (mode) {
834     case VSD_MODE_FORWARD:
835         xive->vsds[type][blk] = vsd;
836         break;
837 
838     case VSD_MODE_EXCLUSIVE:
839         pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
840         break;
841 
842     default:
843         xive2_error(xive, "VST: unsupported table mode %d", mode);
844         return;
845     }
846 }
847 
848 static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
849 {
850     uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
851                             xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
852     uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
853                            xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
854 
855     pnv_xive2_vst_set_data(xive, vsd, type, blk);
856 }
857 
858 /*
859  * MMIO handlers
860  */
861 
862 
863 /*
864  * IC BAR layout
865  *
866  * Page 0: Internal CQ register accesses (reads & writes)
867  * Page 1: Internal PC register accesses (reads & writes)
868  * Page 2: Internal VC register accesses (reads & writes)
869  * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
870  * Page 4: Notify Port page (writes only, w/data),
871  * Page 5: Reserved
872  * Page 6: Sync Poll page (writes only, dataless)
873  * Page 7: Sync Inject page (writes only, dataless)
874  * Page 8: LSI Trigger page (writes only, dataless)
875  * Page 9: LSI SB Management page (reads & writes dataless)
876  * Pages 10-255: Reserved
877  * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
878  *                covering the 128 threads in P10.
879  * Pages 384-511: Reserved
880  */
881 typedef struct PnvXive2Region {
882     const char *name;
883     uint32_t pgoff;
884     uint32_t pgsize;
885     const MemoryRegionOps *ops;
886 } PnvXive2Region;
887 
888 static const MemoryRegionOps pnv_xive2_ic_cq_ops;
889 static const MemoryRegionOps pnv_xive2_ic_pc_ops;
890 static const MemoryRegionOps pnv_xive2_ic_vc_ops;
891 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
892 static const MemoryRegionOps pnv_xive2_ic_notify_ops;
893 static const MemoryRegionOps pnv_xive2_ic_sync_ops;
894 static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
895 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
896 
897 /* 512 pages. 4K: 2M range, 64K: 32M range */
898 static const PnvXive2Region pnv_xive2_ic_regions[] = {
899     { "xive-ic-cq",        0,   1,   &pnv_xive2_ic_cq_ops     },
900     { "xive-ic-vc",        1,   1,   &pnv_xive2_ic_vc_ops     },
901     { "xive-ic-pc",        2,   1,   &pnv_xive2_ic_pc_ops     },
902     { "xive-ic-tctxt",     3,   1,   &pnv_xive2_ic_tctxt_ops  },
903     { "xive-ic-notify",    4,   1,   &pnv_xive2_ic_notify_ops },
904     /* page 5 reserved */
905     { "xive-ic-sync",      6,   2,   &pnv_xive2_ic_sync_ops   },
906     { "xive-ic-lsi",       8,   2,   &pnv_xive2_ic_lsi_ops    },
907     /* pages 10-255 reserved */
908     { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops  },
909     /* pages 384-511 reserved */
910 };
911 
912 /*
913  * CQ operations
914  */
915 
916 static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
917                                         unsigned size)
918 {
919     PnvXive2 *xive = PNV_XIVE2(opaque);
920     uint32_t reg = offset >> 3;
921     uint64_t val = 0;
922 
923     switch (offset) {
924     case CQ_XIVE_CAP: /* Set at reset */
925     case CQ_XIVE_CFG:
926         val = xive->cq_regs[reg];
927         break;
928     case CQ_MSGSND: /* TODO check the #cores of the machine */
929         val = 0xffffffff00000000;
930         break;
931     case CQ_CFG_PB_GEN:
932         val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
933         break;
934     default:
935         xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
936     }
937 
938     return val;
939 }
940 
941 static uint64_t pnv_xive2_bar_size(uint64_t val)
942 {
943     return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
944 }
945 
946 static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
947                                   uint64_t val, unsigned size)
948 {
949     PnvXive2 *xive = PNV_XIVE2(opaque);
950     MemoryRegion *sysmem = get_system_memory();
951     uint32_t reg = offset >> 3;
952     int i;
953 
954     switch (offset) {
955     case CQ_XIVE_CFG:
956     case CQ_RST_CTL: /* TODO: reset all BARs */
957         break;
958 
959     case CQ_IC_BAR:
960         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
961         if (!(val & CQ_IC_BAR_VALID)) {
962             xive->ic_base = 0;
963             if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
964                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
965                     memory_region_del_subregion(&xive->ic_mmio,
966                                                 &xive->ic_mmios[i]);
967                 }
968                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
969             }
970         } else {
971             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
972             if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
973                 for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
974                     memory_region_add_subregion(&xive->ic_mmio,
975                                pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
976                                &xive->ic_mmios[i]);
977                 }
978                 memory_region_add_subregion(sysmem, xive->ic_base,
979                                             &xive->ic_mmio);
980             }
981         }
982         break;
983 
984     case CQ_TM_BAR:
985         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
986         if (!(val & CQ_TM_BAR_VALID)) {
987             xive->tm_base = 0;
988             if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
989                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
990             }
991         } else {
992             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
993             if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
994                 memory_region_add_subregion(sysmem, xive->tm_base,
995                                             &xive->tm_mmio);
996             }
997         }
998         break;
999 
1000     case CQ_ESB_BAR:
1001         xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
1002         if (!(val & CQ_BAR_VALID)) {
1003             xive->esb_base = 0;
1004             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1005                 memory_region_del_subregion(sysmem, &xive->esb_mmio);
1006             }
1007         } else {
1008             xive->esb_base = val & CQ_BAR_ADDR;
1009             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1010                 memory_region_set_size(&xive->esb_mmio,
1011                                        pnv_xive2_bar_size(val));
1012                 memory_region_add_subregion(sysmem, xive->esb_base,
1013                                             &xive->esb_mmio);
1014             }
1015         }
1016         break;
1017 
1018     case CQ_END_BAR:
1019         xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
1020         if (!(val & CQ_BAR_VALID)) {
1021             xive->end_base = 0;
1022             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1023                 memory_region_del_subregion(sysmem, &xive->end_mmio);
1024             }
1025         } else {
1026             xive->end_base = val & CQ_BAR_ADDR;
1027             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1028                 memory_region_set_size(&xive->end_mmio,
1029                                        pnv_xive2_bar_size(val));
1030                 memory_region_add_subregion(sysmem, xive->end_base,
1031                                             &xive->end_mmio);
1032             }
1033         }
1034         break;
1035 
1036     case CQ_NVC_BAR:
1037         xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
1038         if (!(val & CQ_BAR_VALID)) {
1039             xive->nvc_base = 0;
1040             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1041                 memory_region_del_subregion(sysmem, &xive->nvc_mmio);
1042             }
1043         } else {
1044             xive->nvc_base = val & CQ_BAR_ADDR;
1045             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1046                 memory_region_set_size(&xive->nvc_mmio,
1047                                        pnv_xive2_bar_size(val));
1048                 memory_region_add_subregion(sysmem, xive->nvc_base,
1049                                             &xive->nvc_mmio);
1050             }
1051         }
1052         break;
1053 
1054     case CQ_NVPG_BAR:
1055         xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
1056         if (!(val & CQ_BAR_VALID)) {
1057             xive->nvpg_base = 0;
1058             if (xive->cq_regs[reg] & CQ_BAR_VALID) {
1059                 memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
1060             }
1061         } else {
1062             xive->nvpg_base = val & CQ_BAR_ADDR;
1063             if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
1064                 memory_region_set_size(&xive->nvpg_mmio,
1065                                        pnv_xive2_bar_size(val));
1066                 memory_region_add_subregion(sysmem, xive->nvpg_base,
1067                                             &xive->nvpg_mmio);
1068             }
1069         }
1070         break;
1071 
1072     case CQ_TAR: /* Set Translation Table Address */
1073         break;
1074     case CQ_TDR: /* Set Translation Table Data */
1075         pnv_xive2_stt_set_data(xive, val);
1076         break;
1077     case CQ_FIRMASK_OR: /* FIR error reporting */
1078         break;
1079     default:
1080         xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
1081         return;
1082     }
1083 
1084     xive->cq_regs[reg] = val;
1085 }
1086 
1087 static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
1088     .read = pnv_xive2_ic_cq_read,
1089     .write = pnv_xive2_ic_cq_write,
1090     .endianness = DEVICE_BIG_ENDIAN,
1091     .valid = {
1092         .min_access_size = 8,
1093         .max_access_size = 8,
1094     },
1095     .impl = {
1096         .min_access_size = 8,
1097         .max_access_size = 8,
1098     },
1099 };
1100 
1101 static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
1102                                             uint64_t *state)
1103 {
1104     uint8_t val = 0xFF;
1105     int i;
1106 
1107     for (i = 3; i >= 0; i--) {
1108         if (BIT(i) & engine_mask) {
1109             if (!(BIT(i) & *state)) {
1110                 *state |= BIT(i);
1111                 val = 3 - i;
1112                 break;
1113             }
1114         }
1115     }
1116     return val;
1117 }
1118 
1119 static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
1120 {
1121     uint8_t engine_bit = 3 - watch_engine;
1122 
1123     if (*state & BIT(engine_bit)) {
1124         *state &= ~BIT(engine_bit);
1125     }
1126 }
1127 
1128 static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
1129 {
1130     uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
1131                                     xive->vc_regs[VC_ENDC_CFG >> 3]);
1132     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1133     uint8_t val;
1134 
1135     /*
1136      * We keep track of which engines are currently busy in the
1137      * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
1138      * the register, we don't return its value but the ID of an engine
1139      * it can use.
1140      * There are 4 engines. 0xFF means no engine is available.
1141      */
1142     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1143     if (val != 0xFF) {
1144         xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1145     }
1146     return val;
1147 }
1148 
1149 static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
1150                                                uint8_t watch_engine)
1151 {
1152     uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
1153 
1154     pnv_xive2_cache_watch_release(&state, watch_engine);
1155     xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
1156 }
1157 
1158 static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
1159                                      unsigned size)
1160 {
1161     PnvXive2 *xive = PNV_XIVE2(opaque);
1162     uint64_t val = 0;
1163     uint32_t reg = offset >> 3;
1164     uint8_t watch_engine;
1165 
1166     switch (offset) {
1167     /*
1168      * VSD table settings.
1169      */
1170     case VC_VSD_TABLE_ADDR:
1171     case VC_VSD_TABLE_DATA:
1172         val = xive->vc_regs[reg];
1173         break;
1174 
1175     /*
1176      * ESB cache updates (not modeled)
1177      */
1178     case VC_ESBC_FLUSH_CTRL:
1179         xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
1180         val = xive->vc_regs[reg];
1181         break;
1182 
1183     case VC_ESBC_CFG:
1184         val = xive->vc_regs[reg];
1185         break;
1186 
1187     /*
1188      * EAS cache updates (not modeled)
1189      */
1190     case VC_EASC_FLUSH_CTRL:
1191         xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
1192         val = xive->vc_regs[reg];
1193         break;
1194 
1195     case VC_ENDC_WATCH_ASSIGN:
1196         val = pnv_xive2_endc_cache_watch_assign(xive);
1197         break;
1198 
1199     case VC_ENDC_CFG:
1200         val = xive->vc_regs[reg];
1201         break;
1202 
1203     /*
1204      * END cache updates
1205      */
1206     case VC_ENDC_WATCH0_SPEC:
1207     case VC_ENDC_WATCH1_SPEC:
1208     case VC_ENDC_WATCH2_SPEC:
1209     case VC_ENDC_WATCH3_SPEC:
1210         watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
1211         xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
1212         pnv_xive2_endc_cache_watch_release(xive, watch_engine);
1213         val = xive->vc_regs[reg];
1214         break;
1215 
1216     case VC_ENDC_WATCH0_DATA0:
1217     case VC_ENDC_WATCH1_DATA0:
1218     case VC_ENDC_WATCH2_DATA0:
1219     case VC_ENDC_WATCH3_DATA0:
1220         /*
1221          * Load DATA registers from cache with data requested by the
1222          * SPEC register
1223          */
1224         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1225         pnv_xive2_end_cache_load(xive, watch_engine);
1226         val = xive->vc_regs[reg];
1227         break;
1228 
1229     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1230     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1231     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1232     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1233         val = xive->vc_regs[reg];
1234         break;
1235 
1236     case VC_ENDC_FLUSH_CTRL:
1237         xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
1238         val = xive->vc_regs[reg];
1239         break;
1240 
1241     /*
1242      * Indirect invalidation
1243      */
1244     case VC_AT_MACRO_KILL_MASK:
1245         val = xive->vc_regs[reg];
1246         break;
1247 
1248     case VC_AT_MACRO_KILL:
1249         xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
1250         val = xive->vc_regs[reg];
1251         break;
1252 
1253     /*
1254      * Interrupt fifo overflow in memory backing store (Not modeled)
1255      */
1256     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1257         val = xive->vc_regs[reg];
1258         break;
1259 
1260     /*
1261      * Synchronisation
1262      */
1263     case VC_ENDC_SYNC_DONE:
1264         val = VC_ENDC_SYNC_POLL_DONE;
1265         break;
1266     default:
1267         xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
1268     }
1269 
1270     return val;
1271 }
1272 
1273 static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
1274                                   uint64_t val, unsigned size)
1275 {
1276     PnvXive2 *xive = PNV_XIVE2(opaque);
1277     uint32_t reg = offset >> 3;
1278     uint8_t watch_engine;
1279 
1280     switch (offset) {
1281     /*
1282      * VSD table settings.
1283      */
1284     case VC_VSD_TABLE_ADDR:
1285        break;
1286     case VC_VSD_TABLE_DATA:
1287         pnv_xive2_vc_vst_set_data(xive, val);
1288         break;
1289 
1290     /*
1291      * ESB cache updates (not modeled)
1292      */
1293     /* case VC_ESBC_FLUSH_CTRL: */
1294     case VC_ESBC_FLUSH_POLL:
1295         xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
1296         /* ESB update */
1297         break;
1298 
1299     case VC_ESBC_FLUSH_INJECT:
1300         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
1301         break;
1302 
1303     case VC_ESBC_CFG:
1304         break;
1305 
1306     /*
1307      * EAS cache updates (not modeled)
1308      */
1309     /* case VC_EASC_FLUSH_CTRL: */
1310     case VC_EASC_FLUSH_POLL:
1311         xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
1312         /* EAS update */
1313         break;
1314 
1315     case VC_EASC_FLUSH_INJECT:
1316         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
1317         break;
1318 
1319     case VC_ENDC_CFG:
1320         break;
1321 
1322     /*
1323      * END cache updates
1324      */
1325     case VC_ENDC_WATCH0_SPEC:
1326     case VC_ENDC_WATCH1_SPEC:
1327     case VC_ENDC_WATCH2_SPEC:
1328     case VC_ENDC_WATCH3_SPEC:
1329          val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
1330         break;
1331 
1332     case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
1333     case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
1334     case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
1335     case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
1336         break;
1337     case VC_ENDC_WATCH0_DATA0:
1338     case VC_ENDC_WATCH1_DATA0:
1339     case VC_ENDC_WATCH2_DATA0:
1340     case VC_ENDC_WATCH3_DATA0:
1341         /* writing to DATA0 triggers the cache write */
1342         watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
1343         xive->vc_regs[reg] = val;
1344         pnv_xive2_end_update(xive, watch_engine);
1345         break;
1346 
1347 
1348     /* case VC_ENDC_FLUSH_CTRL: */
1349     case VC_ENDC_FLUSH_POLL:
1350         xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
1351         break;
1352 
1353     case VC_ENDC_FLUSH_INJECT:
1354         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
1355         break;
1356 
1357     /*
1358      * Indirect invalidation
1359      */
1360     case VC_AT_MACRO_KILL:
1361     case VC_AT_MACRO_KILL_MASK:
1362         break;
1363 
1364     /*
1365      * Interrupt fifo overflow in memory backing store (Not modeled)
1366      */
1367     case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
1368         break;
1369 
1370     /*
1371      * Synchronisation
1372      */
1373     case VC_ENDC_SYNC_DONE:
1374         break;
1375 
1376     default:
1377         xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
1378         return;
1379     }
1380 
1381     xive->vc_regs[reg] = val;
1382 }
1383 
1384 static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
1385     .read = pnv_xive2_ic_vc_read,
1386     .write = pnv_xive2_ic_vc_write,
1387     .endianness = DEVICE_BIG_ENDIAN,
1388     .valid = {
1389         .min_access_size = 8,
1390         .max_access_size = 8,
1391     },
1392     .impl = {
1393         .min_access_size = 8,
1394         .max_access_size = 8,
1395     },
1396 };
1397 
1398 static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
1399 {
1400     uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
1401                                     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
1402     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1403     uint8_t val;
1404 
1405     /*
1406      * We keep track of which engines are currently busy in the
1407      * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
1408      * the register, we don't return its value but the ID of an engine
1409      * it can use.
1410      * There are 4 engines. 0xFF means no engine is available.
1411      */
1412     val = pnv_xive2_cache_watch_assign(engine_mask, &state);
1413     if (val != 0xFF) {
1414         xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1415     }
1416     return val;
1417 }
1418 
1419 static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
1420                                               uint8_t watch_engine)
1421 {
1422     uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
1423 
1424     pnv_xive2_cache_watch_release(&state, watch_engine);
1425     xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
1426 }
1427 
1428 static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
1429                                      unsigned size)
1430 {
1431     PnvXive2 *xive = PNV_XIVE2(opaque);
1432     uint64_t val = -1;
1433     uint32_t reg = offset >> 3;
1434     uint8_t watch_engine;
1435 
1436     switch (offset) {
1437     /*
1438      * VSD table settings.
1439      */
1440     case PC_VSD_TABLE_ADDR:
1441     case PC_VSD_TABLE_DATA:
1442         val = xive->pc_regs[reg];
1443         break;
1444 
1445     case PC_NXC_WATCH_ASSIGN:
1446         val = pnv_xive2_nxc_cache_watch_assign(xive);
1447         break;
1448 
1449     case PC_NXC_PROC_CONFIG:
1450         val = xive->pc_regs[reg];
1451         break;
1452 
1453     /*
1454      * cache updates
1455      */
1456     case PC_NXC_WATCH0_SPEC:
1457     case PC_NXC_WATCH1_SPEC:
1458     case PC_NXC_WATCH2_SPEC:
1459     case PC_NXC_WATCH3_SPEC:
1460         watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
1461         xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
1462         pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
1463         val = xive->pc_regs[reg];
1464         break;
1465 
1466     case PC_NXC_WATCH0_DATA0:
1467     case PC_NXC_WATCH1_DATA0:
1468     case PC_NXC_WATCH2_DATA0:
1469     case PC_NXC_WATCH3_DATA0:
1470        /*
1471         * Load DATA registers from cache with data requested by the
1472         * SPEC register
1473         */
1474         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1475         pnv_xive2_nxc_cache_load(xive, watch_engine);
1476         val = xive->pc_regs[reg];
1477         break;
1478 
1479     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1480     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1481     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1482     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1483         val = xive->pc_regs[reg];
1484         break;
1485 
1486     case PC_NXC_FLUSH_CTRL:
1487         xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
1488         val = xive->pc_regs[reg];
1489         break;
1490 
1491     /*
1492      * Indirect invalidation
1493      */
1494     case PC_AT_KILL:
1495         xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
1496         val = xive->pc_regs[reg];
1497         break;
1498 
1499     default:
1500         xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
1501     }
1502 
1503     return val;
1504 }
1505 
1506 static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
1507 {
1508     uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
1509                             xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1510     uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
1511                            xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
1512 
1513     pnv_xive2_vst_set_data(xive, vsd, type, blk);
1514 }
1515 
1516 static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
1517                                   uint64_t val, unsigned size)
1518 {
1519     PnvXive2 *xive = PNV_XIVE2(opaque);
1520     uint32_t reg = offset >> 3;
1521     uint8_t watch_engine;
1522 
1523     switch (offset) {
1524 
1525     /*
1526      * VSD table settings.
1527      * The Xive2Router model combines both VC and PC sub-engines. We
1528      * allow to configure the tables through both, for the rare cases
1529      * where a table only really needs to be configured for one of
1530      * them (e.g. the NVG table for the presenter). It assumes that
1531      * firmware passes the same address to the VC and PC when tables
1532      * are defined for both, which seems acceptable.
1533      */
1534     case PC_VSD_TABLE_ADDR:
1535         break;
1536     case PC_VSD_TABLE_DATA:
1537         pnv_xive2_pc_vst_set_data(xive, val);
1538         break;
1539 
1540     case PC_NXC_PROC_CONFIG:
1541         break;
1542 
1543     /*
1544      * cache updates
1545      */
1546     case PC_NXC_WATCH0_SPEC:
1547     case PC_NXC_WATCH1_SPEC:
1548     case PC_NXC_WATCH2_SPEC:
1549     case PC_NXC_WATCH3_SPEC:
1550         val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
1551         break;
1552 
1553     case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
1554     case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
1555     case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
1556     case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
1557         break;
1558     case PC_NXC_WATCH0_DATA0:
1559     case PC_NXC_WATCH1_DATA0:
1560     case PC_NXC_WATCH2_DATA0:
1561     case PC_NXC_WATCH3_DATA0:
1562         /* writing to DATA0 triggers the cache write */
1563         watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
1564         xive->pc_regs[reg] = val;
1565         pnv_xive2_nxc_update(xive, watch_engine);
1566         break;
1567 
1568    /* case PC_NXC_FLUSH_CTRL: */
1569     case PC_NXC_FLUSH_POLL:
1570         xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
1571         break;
1572 
1573     case PC_NXC_FLUSH_INJECT:
1574         pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
1575         break;
1576 
1577     /*
1578      * Indirect invalidation
1579      */
1580     case PC_AT_KILL:
1581     case PC_AT_KILL_MASK:
1582         break;
1583 
1584     default:
1585         xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
1586         return;
1587     }
1588 
1589     xive->pc_regs[reg] = val;
1590 }
1591 
1592 static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
1593     .read = pnv_xive2_ic_pc_read,
1594     .write = pnv_xive2_ic_pc_write,
1595     .endianness = DEVICE_BIG_ENDIAN,
1596     .valid = {
1597         .min_access_size = 8,
1598         .max_access_size = 8,
1599     },
1600     .impl = {
1601         .min_access_size = 8,
1602         .max_access_size = 8,
1603     },
1604 };
1605 
1606 
1607 static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
1608                                         unsigned size)
1609 {
1610     PnvXive2 *xive = PNV_XIVE2(opaque);
1611     uint64_t val = -1;
1612     uint32_t reg = offset >> 3;
1613 
1614     switch (offset) {
1615     /*
1616      * XIVE2 hardware thread enablement
1617      */
1618     case TCTXT_EN0:
1619     case TCTXT_EN1:
1620         val = xive->tctxt_regs[reg];
1621         break;
1622 
1623     case TCTXT_EN0_SET:
1624     case TCTXT_EN0_RESET:
1625         val = xive->tctxt_regs[TCTXT_EN0 >> 3];
1626         break;
1627     case TCTXT_EN1_SET:
1628     case TCTXT_EN1_RESET:
1629         val = xive->tctxt_regs[TCTXT_EN1 >> 3];
1630         break;
1631     case TCTXT_CFG:
1632         val = xive->tctxt_regs[reg];
1633         break;
1634     default:
1635         xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
1636     }
1637 
1638     return val;
1639 }
1640 
1641 static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
1642                                      uint64_t val, unsigned size)
1643 {
1644     PnvXive2 *xive = PNV_XIVE2(opaque);
1645     uint32_t reg = offset >> 3;
1646 
1647     switch (offset) {
1648     /*
1649      * XIVE2 hardware thread enablement
1650      */
1651     case TCTXT_EN0: /* Physical Thread Enable */
1652     case TCTXT_EN1: /* Physical Thread Enable (fused core) */
1653         xive->tctxt_regs[reg] = val;
1654         break;
1655 
1656     case TCTXT_EN0_SET:
1657         xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
1658         break;
1659     case TCTXT_EN1_SET:
1660         xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
1661         break;
1662     case TCTXT_EN0_RESET:
1663         xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
1664         break;
1665     case TCTXT_EN1_RESET:
1666         xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
1667         break;
1668     case TCTXT_CFG:
1669         xive->tctxt_regs[reg] = val;
1670         break;
1671     default:
1672         xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
1673         return;
1674     }
1675 }
1676 
1677 static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
1678     .read = pnv_xive2_ic_tctxt_read,
1679     .write = pnv_xive2_ic_tctxt_write,
1680     .endianness = DEVICE_BIG_ENDIAN,
1681     .valid = {
1682         .min_access_size = 8,
1683         .max_access_size = 8,
1684     },
1685     .impl = {
1686         .min_access_size = 8,
1687         .max_access_size = 8,
1688     },
1689 };
1690 
1691 /*
1692  * Redirect XSCOM to MMIO handlers
1693  */
1694 static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
1695                                      unsigned size)
1696 {
1697     PnvXive2 *xive = PNV_XIVE2(opaque);
1698     uint64_t val = -1;
1699     uint32_t xscom_reg = offset >> 3;
1700     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1701 
1702     switch (xscom_reg) {
1703     case 0x000 ... 0x0FF:
1704         val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
1705         break;
1706     case 0x100 ... 0x1FF:
1707         val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
1708         break;
1709     case 0x200 ... 0x2FF:
1710         val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
1711         break;
1712     case 0x300 ... 0x3FF:
1713         val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
1714         break;
1715     default:
1716         xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
1717     }
1718 
1719     return val;
1720 }
1721 
1722 static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
1723                                   uint64_t val, unsigned size)
1724 {
1725     PnvXive2 *xive = PNV_XIVE2(opaque);
1726     uint32_t xscom_reg = offset >> 3;
1727     uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
1728 
1729     switch (xscom_reg) {
1730     case 0x000 ... 0x0FF:
1731         pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
1732         break;
1733     case 0x100 ... 0x1FF:
1734         pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
1735         break;
1736     case 0x200 ... 0x2FF:
1737         pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
1738         break;
1739     case 0x300 ... 0x3FF:
1740         pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
1741         break;
1742     default:
1743         xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
1744     }
1745 }
1746 
1747 static const MemoryRegionOps pnv_xive2_xscom_ops = {
1748     .read = pnv_xive2_xscom_read,
1749     .write = pnv_xive2_xscom_write,
1750     .endianness = DEVICE_BIG_ENDIAN,
1751     .valid = {
1752         .min_access_size = 8,
1753         .max_access_size = 8,
1754     },
1755     .impl = {
1756         .min_access_size = 8,
1757         .max_access_size = 8,
1758     },
1759 };
1760 
1761 /*
1762  * Notify port page. The layout is compatible between 4K and 64K pages :
1763  *
1764  * Page 1           Notify page (writes only)
1765  *  0x000 - 0x7FF   IPI interrupt (NPU)
1766  *  0x800 - 0xFFF   HW interrupt triggers (PSI, PHB)
1767  */
1768 
1769 static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
1770                                     uint64_t val)
1771 {
1772     uint8_t blk;
1773     uint32_t idx;
1774 
1775     if (val & XIVE_TRIGGER_END) {
1776         xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1777                    addr, val);
1778         return;
1779     }
1780 
1781     /*
1782      * Forward the source event notification directly to the Router.
1783      * The source interrupt number should already be correctly encoded
1784      * with the chip block id by the sending device (PHB, PSI).
1785      */
1786     blk = XIVE_EAS_BLOCK(val);
1787     idx = XIVE_EAS_INDEX(val);
1788 
1789     xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1790                          !!(val & XIVE_TRIGGER_PQ));
1791 }
1792 
1793 static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
1794                                       uint64_t val, unsigned size)
1795 {
1796     PnvXive2 *xive = PNV_XIVE2(opaque);
1797 
1798     /* VC: IPI triggers */
1799     switch (offset) {
1800     case 0x000 ... 0x7FF:
1801         /* TODO: check IPI notify sub-page routing */
1802         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1803         break;
1804 
1805     /* VC: HW triggers */
1806     case 0x800 ... 0xFFF:
1807         pnv_xive2_ic_hw_trigger(opaque, offset, val);
1808         break;
1809 
1810     default:
1811         xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
1812     }
1813 }
1814 
1815 static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
1816                                          unsigned size)
1817 {
1818     PnvXive2 *xive = PNV_XIVE2(opaque);
1819 
1820    /* loads are invalid */
1821     xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
1822     return -1;
1823 }
1824 
1825 static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
1826     .read = pnv_xive2_ic_notify_read,
1827     .write = pnv_xive2_ic_notify_write,
1828     .endianness = DEVICE_BIG_ENDIAN,
1829     .valid = {
1830         .min_access_size = 8,
1831         .max_access_size = 8,
1832     },
1833     .impl = {
1834         .min_access_size = 8,
1835         .max_access_size = 8,
1836     },
1837 };
1838 
1839 static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
1840                                       unsigned size)
1841 {
1842     PnvXive2 *xive = PNV_XIVE2(opaque);
1843 
1844     xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
1845     return -1;
1846 }
1847 
1848 static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
1849                                    uint64_t val, unsigned size)
1850 {
1851     PnvXive2 *xive = PNV_XIVE2(opaque);
1852 
1853     xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
1854 }
1855 
1856 static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
1857     .read = pnv_xive2_ic_lsi_read,
1858     .write = pnv_xive2_ic_lsi_write,
1859     .endianness = DEVICE_BIG_ENDIAN,
1860     .valid = {
1861         .min_access_size = 8,
1862         .max_access_size = 8,
1863     },
1864     .impl = {
1865         .min_access_size = 8,
1866         .max_access_size = 8,
1867     },
1868 };
1869 
1870 /*
1871  * Sync MMIO page (write only)
1872  */
1873 #define PNV_XIVE2_SYNC_IPI              0x000
1874 #define PNV_XIVE2_SYNC_HW               0x080
1875 #define PNV_XIVE2_SYNC_NxC              0x100
1876 #define PNV_XIVE2_SYNC_INT              0x180
1877 #define PNV_XIVE2_SYNC_OS_ESC           0x200
1878 #define PNV_XIVE2_SYNC_POOL_ESC         0x280
1879 #define PNV_XIVE2_SYNC_HARD_ESC         0x300
1880 #define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO   0x800
1881 #define PNV_XIVE2_SYNC_NXC_LD_LCL_CO    0x880
1882 #define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI   0x900
1883 #define PNV_XIVE2_SYNC_NXC_ST_LCL_CI    0x980
1884 #define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI   0xA00
1885 #define PNV_XIVE2_SYNC_NXC_ST_RMT_CI    0xA80
1886 
1887 static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
1888                                        unsigned size)
1889 {
1890     PnvXive2 *xive = PNV_XIVE2(opaque);
1891 
1892     /* loads are invalid */
1893     xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
1894     return -1;
1895 }
1896 
1897 /*
1898  * The sync MMIO space spans two pages.  The lower page is use for
1899  * queue sync "poll" requests while the upper page is used for queue
1900  * sync "inject" requests.  Inject requests require the HW to write
1901  * a byte of all 1's to a predetermined location in memory in order
1902  * to signal completion of the request.  Both pages have the same
1903  * layout, so it is easiest to handle both with a single function.
1904  */
1905 static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
1906                                     uint64_t val, unsigned size)
1907 {
1908     PnvXive2 *xive = PNV_XIVE2(opaque);
1909     int inject_type;
1910     hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
1911 
1912     /* adjust offset for inject page */
1913     hwaddr adj_offset = offset & pg_offset_mask;
1914 
1915     switch (adj_offset) {
1916     case PNV_XIVE2_SYNC_IPI:
1917         inject_type = PNV_XIVE2_QUEUE_IPI;
1918         break;
1919     case PNV_XIVE2_SYNC_HW:
1920         inject_type = PNV_XIVE2_QUEUE_HW;
1921         break;
1922     case PNV_XIVE2_SYNC_NxC:
1923         inject_type = PNV_XIVE2_QUEUE_NXC;
1924         break;
1925     case PNV_XIVE2_SYNC_INT:
1926         inject_type = PNV_XIVE2_QUEUE_INT;
1927         break;
1928     case PNV_XIVE2_SYNC_OS_ESC:
1929         inject_type = PNV_XIVE2_QUEUE_OS;
1930         break;
1931     case PNV_XIVE2_SYNC_POOL_ESC:
1932         inject_type = PNV_XIVE2_QUEUE_POOL;
1933         break;
1934     case PNV_XIVE2_SYNC_HARD_ESC:
1935         inject_type = PNV_XIVE2_QUEUE_HARD;
1936         break;
1937     case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
1938         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
1939         break;
1940     case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
1941         inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
1942         break;
1943     case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
1944         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
1945         break;
1946     case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
1947         inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
1948         break;
1949     case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
1950         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
1951         break;
1952     case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
1953         inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
1954         break;
1955     default:
1956         xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
1957         return;
1958     }
1959 
1960     /* Write Queue Sync notification byte if writing to sync inject page */
1961     if ((offset & ~pg_offset_mask) != 0) {
1962         pnv_xive2_inject_notify(xive, inject_type);
1963     }
1964 }
1965 
1966 static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
1967     .read = pnv_xive2_ic_sync_read,
1968     .write = pnv_xive2_ic_sync_write,
1969     .endianness = DEVICE_BIG_ENDIAN,
1970     .valid = {
1971         .min_access_size = 8,
1972         .max_access_size = 8,
1973     },
1974     .impl = {
1975         .min_access_size = 8,
1976         .max_access_size = 8,
1977     },
1978 };
1979 
1980 /*
1981  * When the TM direct pages of the IC controller are accessed, the
1982  * target HW thread is deduced from the page offset.
1983  */
1984 static uint32_t pnv_xive2_ic_tm_get_pir(PnvXive2 *xive, hwaddr offset)
1985 {
1986     /* On P10, the node ID shift in the PIR register is 8 bits */
1987     return xive->chip->chip_id << 8 | offset >> xive->ic_shift;
1988 }
1989 
1990 static uint32_t pnv_xive2_ic_tm_get_hw_page_offset(PnvXive2 *xive,
1991                                                    hwaddr offset)
1992 {
1993     /*
1994      * Indirect TIMA accesses are similar to direct accesses for
1995      * privilege ring 0. So remove any traces of the hw thread ID from
1996      * the offset in the IC BAR as it could be interpreted as the ring
1997      * privilege when calling the underlying direct access functions.
1998      */
1999     return offset & ((1ull << xive->ic_shift) - 1);
2000 }
2001 
2002 static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
2003 {
2004     PnvChip *chip = xive->chip;
2005     PowerPCCPU *cpu = NULL;
2006 
2007     cpu = pnv_chip_find_cpu(chip, pir);
2008     if (!cpu) {
2009         xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
2010         return NULL;
2011     }
2012 
2013     if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
2014         xive2_error(xive, "IC: CPU %x is not enabled", pir);
2015     }
2016 
2017     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2018 }
2019 
2020 static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
2021                                               unsigned size)
2022 {
2023     PnvXive2 *xive = PNV_XIVE2(opaque);
2024     XivePresenter *xptr = XIVE_PRESENTER(xive);
2025     hwaddr hw_page_offset;
2026     uint32_t pir;
2027     XiveTCTX *tctx;
2028     uint64_t val = -1;
2029 
2030     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2031     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2032     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2033     if (tctx) {
2034         val = xive_tctx_tm_read(xptr, tctx, hw_page_offset, size);
2035     }
2036 
2037     return val;
2038 }
2039 
2040 static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
2041                                            uint64_t val, unsigned size)
2042 {
2043     PnvXive2 *xive = PNV_XIVE2(opaque);
2044     XivePresenter *xptr = XIVE_PRESENTER(xive);
2045     hwaddr hw_page_offset;
2046     uint32_t pir;
2047     XiveTCTX *tctx;
2048 
2049     pir = pnv_xive2_ic_tm_get_pir(xive, offset);
2050     hw_page_offset = pnv_xive2_ic_tm_get_hw_page_offset(xive, offset);
2051     tctx = pnv_xive2_get_indirect_tctx(xive, pir);
2052     if (tctx) {
2053         xive_tctx_tm_write(xptr, tctx, hw_page_offset, val, size);
2054     }
2055 }
2056 
2057 static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
2058     .read = pnv_xive2_ic_tm_indirect_read,
2059     .write = pnv_xive2_ic_tm_indirect_write,
2060     .endianness = DEVICE_BIG_ENDIAN,
2061     .valid = {
2062         .min_access_size = 1,
2063         .max_access_size = 8,
2064     },
2065     .impl = {
2066         .min_access_size = 1,
2067         .max_access_size = 8,
2068     },
2069 };
2070 
2071 /*
2072  * TIMA ops
2073  */
2074 static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
2075                                uint64_t value, unsigned size)
2076 {
2077     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2078     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2079     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2080     XivePresenter *xptr = XIVE_PRESENTER(xive);
2081 
2082     xive_tctx_tm_write(xptr, tctx, offset, value, size);
2083 }
2084 
2085 static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
2086 {
2087     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
2088     PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
2089     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
2090     XivePresenter *xptr = XIVE_PRESENTER(xive);
2091 
2092     return xive_tctx_tm_read(xptr, tctx, offset, size);
2093 }
2094 
2095 static const MemoryRegionOps pnv_xive2_tm_ops = {
2096     .read = pnv_xive2_tm_read,
2097     .write = pnv_xive2_tm_write,
2098     .endianness = DEVICE_BIG_ENDIAN,
2099     .valid = {
2100         .min_access_size = 1,
2101         .max_access_size = 8,
2102     },
2103     .impl = {
2104         .min_access_size = 1,
2105         .max_access_size = 8,
2106     },
2107 };
2108 
2109 static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
2110                                    unsigned size)
2111 {
2112     PnvXive2 *xive = PNV_XIVE2(opaque);
2113 
2114     xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
2115     return -1;
2116 }
2117 
2118 static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
2119                                 uint64_t val, unsigned size)
2120 {
2121     PnvXive2 *xive = PNV_XIVE2(opaque);
2122 
2123     xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
2124 }
2125 
2126 static const MemoryRegionOps pnv_xive2_nvc_ops = {
2127     .read = pnv_xive2_nvc_read,
2128     .write = pnv_xive2_nvc_write,
2129     .endianness = DEVICE_BIG_ENDIAN,
2130     .valid = {
2131         .min_access_size = 8,
2132         .max_access_size = 8,
2133     },
2134     .impl = {
2135         .min_access_size = 8,
2136         .max_access_size = 8,
2137     },
2138 };
2139 
2140 static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
2141                                     unsigned size)
2142 {
2143     PnvXive2 *xive = PNV_XIVE2(opaque);
2144 
2145     xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
2146     return -1;
2147 }
2148 
2149 static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
2150                                  uint64_t val, unsigned size)
2151 {
2152     PnvXive2 *xive = PNV_XIVE2(opaque);
2153 
2154     xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
2155 }
2156 
2157 static const MemoryRegionOps pnv_xive2_nvpg_ops = {
2158     .read = pnv_xive2_nvpg_read,
2159     .write = pnv_xive2_nvpg_write,
2160     .endianness = DEVICE_BIG_ENDIAN,
2161     .valid = {
2162         .min_access_size = 8,
2163         .max_access_size = 8,
2164     },
2165     .impl = {
2166         .min_access_size = 8,
2167         .max_access_size = 8,
2168     },
2169 };
2170 
2171 /*
2172  * POWER10 default capabilities: 0x2000120076f000FC
2173  */
2174 #define PNV_XIVE2_CAPABILITIES  0x2000120076f000FC
2175 
2176 /*
2177  * POWER10 default configuration: 0x0030000033000000
2178  *
2179  * 8bits thread id was dropped for P10
2180  */
2181 #define PNV_XIVE2_CONFIGURATION 0x0030000033000000
2182 
2183 static void pnv_xive2_reset(void *dev)
2184 {
2185     PnvXive2 *xive = PNV_XIVE2(dev);
2186     XiveSource *xsrc = &xive->ipi_source;
2187     Xive2EndSource *end_xsrc = &xive->end_source;
2188 
2189     xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
2190     xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
2191 
2192     /* HW hardwires the #Topology of the chip in the block field */
2193     xive->cq_regs[CQ_XIVE_CFG >> 3] |=
2194         SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
2195 
2196     /* VC and PC cache watch assign mechanism */
2197     xive->vc_regs[VC_ENDC_CFG >> 3] =
2198         SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
2199     xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
2200         SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
2201 
2202     /* Set default page size to 64k */
2203     xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
2204     xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
2205 
2206     /* Clear source MMIOs */
2207     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
2208         memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
2209     }
2210 
2211     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
2212         memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
2213     }
2214 }
2215 
2216 /*
2217  *  Maximum number of IRQs and ENDs supported by HW. Will be tuned by
2218  *  software.
2219  */
2220 #define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2221 #define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
2222 
2223 static void pnv_xive2_realize(DeviceState *dev, Error **errp)
2224 {
2225     PnvXive2 *xive = PNV_XIVE2(dev);
2226     PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
2227     XiveSource *xsrc = &xive->ipi_source;
2228     Xive2EndSource *end_xsrc = &xive->end_source;
2229     Error *local_err = NULL;
2230     int i;
2231 
2232     pxc->parent_realize(dev, &local_err);
2233     if (local_err) {
2234         error_propagate(errp, local_err);
2235         return;
2236     }
2237 
2238     assert(xive->chip);
2239 
2240     /*
2241      * The XiveSource and Xive2EndSource objects are realized with the
2242      * maximum allowed HW configuration. The ESB MMIO regions will be
2243      * resized dynamically when the controller is configured by the FW
2244      * to limit accesses to resources not provisioned.
2245      */
2246     object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
2247                             &error_fatal);
2248     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
2249                             &error_fatal);
2250     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
2251                              &error_fatal);
2252     qdev_realize(DEVICE(xsrc), NULL, &local_err);
2253     if (local_err) {
2254         error_propagate(errp, local_err);
2255         return;
2256     }
2257 
2258     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
2259                             &error_fatal);
2260     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
2261                              &error_abort);
2262     qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
2263     if (local_err) {
2264         error_propagate(errp, local_err);
2265         return;
2266     }
2267 
2268     /* XSCOM region, used for initial configuration of the BARs */
2269     memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
2270                           &pnv_xive2_xscom_ops, xive, "xscom-xive",
2271                           PNV10_XSCOM_XIVE2_SIZE << 3);
2272 
2273     /* Interrupt controller MMIO regions */
2274     xive->ic_shift = 16;
2275     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
2276                        PNV10_XIVE2_IC_SIZE);
2277 
2278     for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
2279         memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
2280                          pnv_xive2_ic_regions[i].ops, xive,
2281                          pnv_xive2_ic_regions[i].name,
2282                          pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
2283     }
2284 
2285     /*
2286      * VC MMIO regions.
2287      */
2288     xive->esb_shift = 16;
2289     xive->end_shift = 16;
2290     memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
2291                        PNV10_XIVE2_ESB_SIZE);
2292     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
2293                        PNV10_XIVE2_END_SIZE);
2294 
2295     /* Presenter Controller MMIO region (not modeled) */
2296     xive->nvc_shift = 16;
2297     xive->nvpg_shift = 16;
2298     memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
2299                           &pnv_xive2_nvc_ops, xive,
2300                           "xive-nvc", PNV10_XIVE2_NVC_SIZE);
2301 
2302     memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
2303                           &pnv_xive2_nvpg_ops, xive,
2304                           "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
2305 
2306     /* Thread Interrupt Management Area (Direct) */
2307     xive->tm_shift = 16;
2308     memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
2309                           xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
2310 
2311     qemu_register_reset(pnv_xive2_reset, dev);
2312 }
2313 
2314 static Property pnv_xive2_properties[] = {
2315     DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
2316     DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
2317     DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
2318     DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
2319     DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
2320     DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
2321     DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
2322                        PNV_XIVE2_CAPABILITIES),
2323     DEFINE_PROP_UINT64("config", PnvXive2, config,
2324                        PNV_XIVE2_CONFIGURATION),
2325     DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
2326     DEFINE_PROP_END_OF_LIST(),
2327 };
2328 
2329 static void pnv_xive2_instance_init(Object *obj)
2330 {
2331     PnvXive2 *xive = PNV_XIVE2(obj);
2332 
2333     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
2334                             TYPE_XIVE_SOURCE);
2335     object_initialize_child(obj, "end_source", &xive->end_source,
2336                             TYPE_XIVE2_END_SOURCE);
2337 }
2338 
2339 static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
2340                               int xscom_offset)
2341 {
2342     const char compat_p10[] = "ibm,power10-xive-x";
2343     char *name;
2344     int offset;
2345     uint32_t reg[] = {
2346         cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
2347         cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
2348     };
2349 
2350     name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
2351     offset = fdt_add_subnode(fdt, xscom_offset, name);
2352     _FDT(offset);
2353     g_free(name);
2354 
2355     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2356     _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
2357                      sizeof(compat_p10)));
2358     return 0;
2359 }
2360 
2361 static void pnv_xive2_class_init(ObjectClass *klass, void *data)
2362 {
2363     DeviceClass *dc = DEVICE_CLASS(klass);
2364     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2365     Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
2366     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2367     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2368     PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
2369 
2370     xdc->dt_xscom  = pnv_xive2_dt_xscom;
2371 
2372     dc->desc       = "PowerNV XIVE2 Interrupt Controller (POWER10)";
2373     device_class_set_parent_realize(dc, pnv_xive2_realize,
2374                                     &pxc->parent_realize);
2375     device_class_set_props(dc, pnv_xive2_properties);
2376 
2377     xrc->get_eas   = pnv_xive2_get_eas;
2378     xrc->get_pq    = pnv_xive2_get_pq;
2379     xrc->set_pq    = pnv_xive2_set_pq;
2380     xrc->get_end   = pnv_xive2_get_end;
2381     xrc->write_end = pnv_xive2_write_end;
2382     xrc->get_nvp   = pnv_xive2_get_nvp;
2383     xrc->write_nvp = pnv_xive2_write_nvp;
2384     xrc->get_config  = pnv_xive2_get_config;
2385     xrc->get_block_id = pnv_xive2_get_block_id;
2386 
2387     xnc->notify    = pnv_xive2_notify;
2388 
2389     xpc->match_nvt  = pnv_xive2_match_nvt;
2390     xpc->get_config = pnv_xive2_presenter_get_config;
2391 };
2392 
2393 static const TypeInfo pnv_xive2_info = {
2394     .name          = TYPE_PNV_XIVE2,
2395     .parent        = TYPE_XIVE2_ROUTER,
2396     .instance_init = pnv_xive2_instance_init,
2397     .instance_size = sizeof(PnvXive2),
2398     .class_init    = pnv_xive2_class_init,
2399     .class_size    = sizeof(PnvXive2Class),
2400     .interfaces    = (InterfaceInfo[]) {
2401         { TYPE_PNV_XSCOM_INTERFACE },
2402         { }
2403     }
2404 };
2405 
2406 static void pnv_xive2_register_types(void)
2407 {
2408     type_register_static(&pnv_xive2_info);
2409 }
2410 
2411 type_init(pnv_xive2_register_types)
2412 
2413 static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
2414                                      GString *buf)
2415 {
2416     uint8_t  eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
2417     uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
2418 
2419     if (!xive2_nvp_is_valid(nvp)) {
2420         return;
2421     }
2422 
2423     g_string_append_printf(buf, "  %08x end:%02x/%04x IPB:%02x",
2424                            nvp_idx, eq_blk, eq_idx,
2425                            xive_get_field32(NVP2_W2_IPB, nvp->w2));
2426     /*
2427      * When the NVP is HW controlled, more fields are updated
2428      */
2429     if (xive2_nvp_is_hw(nvp)) {
2430         g_string_append_printf(buf, " CPPR:%02x",
2431                                xive_get_field32(NVP2_W2_CPPR, nvp->w2));
2432         if (xive2_nvp_is_co(nvp)) {
2433             g_string_append_printf(buf, " CO:%04x",
2434                                    xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
2435         }
2436     }
2437     g_string_append_c(buf, '\n');
2438 }
2439 
2440 /*
2441  * If the table is direct, we can compute the number of PQ entries
2442  * provisioned by FW.
2443  */
2444 static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
2445 {
2446     uint8_t blk = pnv_xive2_block_id(xive);
2447     uint64_t vsd = xive->vsds[VST_ESB][blk];
2448     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
2449 
2450     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
2451 }
2452 
2453 /*
2454  * Compute the number of entries per indirect subpage.
2455  */
2456 static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
2457 {
2458     uint8_t blk = pnv_xive2_block_id(xive);
2459     uint64_t vsd = xive->vsds[type][blk];
2460     const XiveVstInfo *info = &vst_infos[type];
2461     uint64_t vsd_addr;
2462     uint32_t page_shift;
2463 
2464     /* For direct tables, fake a valid value */
2465     if (!(VSD_INDIRECT & vsd)) {
2466         return 1;
2467     }
2468 
2469     /* Get the page size of the indirect table. */
2470     vsd_addr = vsd & VSD_ADDRESS_MASK;
2471     ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
2472 
2473     if (!(vsd & VSD_ADDRESS_MASK)) {
2474 #ifdef XIVE2_DEBUG
2475         xive2_error(xive, "VST: invalid %s entry!?", info->name);
2476 #endif
2477         return 0;
2478     }
2479 
2480     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
2481 
2482     if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
2483         xive2_error(xive, "VST: invalid %s page shift %d", info->name,
2484                    page_shift);
2485         return 0;
2486     }
2487 
2488     return (1ull << page_shift) / info->size;
2489 }
2490 
2491 void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
2492 {
2493     Xive2Router *xrtr = XIVE2_ROUTER(xive);
2494     uint8_t blk = pnv_xive2_block_id(xive);
2495     uint8_t chip_id = xive->chip->chip_id;
2496     uint32_t srcno0 = XIVE_EAS(blk, 0);
2497     uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
2498     Xive2Eas eas;
2499     Xive2End end;
2500     Xive2Nvp nvp;
2501     int i;
2502     uint64_t xive_nvp_per_subpage;
2503 
2504     g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
2505                            blk, srcno0, srcno0 + nr_esbs - 1);
2506     xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
2507 
2508     g_string_append_printf(buf, "XIVE[%x] EAT %08x .. %08x\n",
2509                            blk, srcno0, srcno0 + nr_esbs - 1);
2510     for (i = 0; i < nr_esbs; i++) {
2511         if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
2512             break;
2513         }
2514         if (!xive2_eas_is_masked(&eas)) {
2515             xive2_eas_pic_print_info(&eas, i, buf);
2516         }
2517     }
2518 
2519     g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
2520                            chip_id, blk);
2521     i = 0;
2522     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2523         xive2_end_eas_pic_print_info(&end, i++, buf);
2524     }
2525 
2526     g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
2527     i = 0;
2528     while (!xive2_router_get_end(xrtr, blk, i, &end)) {
2529         xive2_end_pic_print_info(&end, i++, buf);
2530     }
2531 
2532     g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
2533                            chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
2534     xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
2535     for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
2536         while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
2537             xive2_nvp_pic_print_info(&nvp, i++, buf);
2538         }
2539     }
2540 }
2541