xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision 5f88dd43)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_chip.h"
22 #include "hw/ppc/pnv_core.h"
23 #include "hw/ppc/pnv_xscom.h"
24 #include "hw/ppc/pnv_xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/ppc/ppc.h"
28 #include "trace.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive_regs.h"
33 
34 #undef XIVE_DEBUG
35 
36 /*
37  * Virtual structures table (VST)
38  */
39 #define SBE_PER_BYTE   4
40 
41 typedef struct XiveVstInfo {
42     const char *name;
43     uint32_t    size;
44     uint32_t    max_blocks;
45 } XiveVstInfo;
46 
47 static const XiveVstInfo vst_infos[] = {
48     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
49     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
50     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
51     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
52 
53     /*
54      *  Interrupt fifo backing store table (not modeled) :
55      *
56      * 0 - IPI,
57      * 1 - HWD,
58      * 2 - First escalate,
59      * 3 - Second escalate,
60      * 4 - Redistribution,
61      * 5 - IPI cascaded queue ?
62      */
63     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
64 };
65 
66 #define xive_error(xive, fmt, ...)                                      \
67     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
68                   (xive)->chip->chip_id, ## __VA_ARGS__);
69 
70 /*
71  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
72  * field overrides the hardwired chip ID in the Powerbus operations
73  * and for CAM compares
74  */
75 static uint8_t pnv_xive_block_id(PnvXive *xive)
76 {
77     uint8_t blk = xive->chip->chip_id;
78     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
79 
80     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
81         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
82     }
83 
84     return blk;
85 }
86 
87 /*
88  * VST accessors for SBE, EAT, ENDT, NVT
89  *
90  * Indirect VST tables are arrays of VSDs pointing to a page (of same
91  * size). Each page is a direct VST table.
92  */
93 
94 #define XIVE_VSD_SIZE 8
95 
96 /* Indirect page size can be 4K, 64K, 2M, 16M. */
97 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
98 {
99      return page_shift == 12 || page_shift == 16 ||
100          page_shift == 21 || page_shift == 24;
101 }
102 
103 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
104                                          uint64_t vsd, uint32_t idx)
105 {
106     const XiveVstInfo *info = &vst_infos[type];
107     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
108     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
109     uint32_t idx_max;
110 
111     idx_max = vst_tsize / info->size - 1;
112     if (idx > idx_max) {
113 #ifdef XIVE_DEBUG
114         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
115                    info->name, idx, idx_max);
116 #endif
117         return 0;
118     }
119 
120     return vst_addr + idx * info->size;
121 }
122 
123 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
124                                            uint64_t vsd, uint32_t idx)
125 {
126     const XiveVstInfo *info = &vst_infos[type];
127     uint64_t vsd_addr;
128     uint32_t vsd_idx;
129     uint32_t page_shift;
130     uint32_t vst_per_page;
131 
132     /* Get the page size of the indirect table. */
133     vsd_addr = vsd & VSD_ADDRESS_MASK;
134     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
135                     MEMTXATTRS_UNSPECIFIED)) {
136         xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
137                    info->name, idx, vsd_addr);
138         return 0;
139     }
140 
141     if (!(vsd & VSD_ADDRESS_MASK)) {
142 #ifdef XIVE_DEBUG
143         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
144 #endif
145         return 0;
146     }
147 
148     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
149 
150     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
151         xive_error(xive, "VST: invalid %s page shift %d", info->name,
152                    page_shift);
153         return 0;
154     }
155 
156     vst_per_page = (1ull << page_shift) / info->size;
157     vsd_idx = idx / vst_per_page;
158 
159     /* Load the VSD we are looking for, if not already done */
160     if (vsd_idx) {
161         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
162         if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
163                        MEMTXATTRS_UNSPECIFIED)) {
164             xive_error(xive, "VST: failed to access %s entry %x @0x%"
165                        PRIx64, info->name, vsd_idx, vsd_addr);
166             return 0;
167         }
168 
169         if (!(vsd & VSD_ADDRESS_MASK)) {
170 #ifdef XIVE_DEBUG
171             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
172 #endif
173             return 0;
174         }
175 
176         /*
177          * Check that the pages have a consistent size across the
178          * indirect table
179          */
180         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
181             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
182                        info->name, idx);
183             return 0;
184         }
185     }
186 
187     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
188 }
189 
190 /*
191  * This is a simplified model of operation forwarding on a remote IC.
192  *
193  * A PC MMIO address is built to identify the NVT structure. The load
194  * on the remote IC will return the address of the structure in RAM,
195  * which will then be used by pnv_xive_vst_write/read to perform the
196  * RAM operation.
197  */
198 static uint64_t pnv_xive_vst_addr_remote(PnvXive *xive, uint32_t type,
199                                          uint64_t vsd, uint8_t blk,
200                                          uint32_t idx)
201 {
202     const XiveVstInfo *info = &vst_infos[type];
203     uint64_t remote_addr = vsd & VSD_ADDRESS_MASK;
204     uint64_t vst_addr;
205     MemTxResult result;
206 
207     if (type != VST_TSEL_VPDT) {
208         xive_error(xive, "VST: invalid access on remote VST %s %x/%x !?",
209                    info->name, blk, idx);
210         return 0;
211     }
212 
213     remote_addr |= idx << xive->pc_shift;
214 
215     vst_addr = address_space_ldq_be(&address_space_memory, remote_addr,
216                                     MEMTXATTRS_UNSPECIFIED, &result);
217     if (result != MEMTX_OK) {
218         xive_error(xive, "VST: read failed at @0x%"  HWADDR_PRIx
219                    " for NVT %x/%x\n", remote_addr, blk, idx);
220         return 0;
221     }
222 
223     return vst_addr;
224 }
225 
226 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
227                                   uint32_t idx)
228 {
229     const XiveVstInfo *info = &vst_infos[type];
230     uint64_t vsd;
231 
232     if (blk >= info->max_blocks) {
233         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
234                    blk, info->name, idx);
235         return 0;
236     }
237 
238     vsd = xive->vsds[type][blk];
239 
240     /* Remote VST access */
241     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
242         return pnv_xive_vst_addr_remote(xive, type, vsd, blk, idx);
243     }
244 
245     if (VSD_INDIRECT & vsd) {
246         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
247     }
248 
249     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
250 }
251 
252 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
253                              uint32_t idx, void *data)
254 {
255     const XiveVstInfo *info = &vst_infos[type];
256     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
257     MemTxResult result;
258 
259     if (!addr) {
260         return -1;
261     }
262 
263     result = address_space_read(&address_space_memory, addr,
264                                 MEMTXATTRS_UNSPECIFIED, data,
265                                 info->size);
266     if (result != MEMTX_OK) {
267         xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
268                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
269         return -1;
270     }
271     return 0;
272 }
273 
274 #define XIVE_VST_WORD_ALL -1
275 
276 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
277                               uint32_t idx, void *data, uint32_t word_number)
278 {
279     const XiveVstInfo *info = &vst_infos[type];
280     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
281     MemTxResult result;
282 
283     if (!addr) {
284         return -1;
285     }
286 
287     if (word_number == XIVE_VST_WORD_ALL) {
288         result = address_space_write(&address_space_memory, addr,
289                                      MEMTXATTRS_UNSPECIFIED, data,
290                                      info->size);
291     } else {
292         result = address_space_write(&address_space_memory,
293                                      addr + word_number * 4,
294                                      MEMTXATTRS_UNSPECIFIED,
295                                      data + word_number * 4, 4);
296     }
297 
298     if (result != MEMTX_OK) {
299         xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
300                     "for VST %s %x/%x\n", addr, info->name, blk, idx);
301         return -1;
302     }
303     return 0;
304 }
305 
306 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
307                             XiveEND *end)
308 {
309     PnvXive *xive = PNV_XIVE(xrtr);
310 
311     if (pnv_xive_block_id(xive) != blk) {
312         xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
313         return -1;
314     }
315 
316     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
317 }
318 
319 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
320                               XiveEND *end, uint8_t word_number)
321 {
322     PnvXive *xive = PNV_XIVE(xrtr);
323 
324     if (pnv_xive_block_id(xive) != blk) {
325         xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
326         return -1;
327     }
328 
329     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
330                               word_number);
331 }
332 
333 static int pnv_xive_end_update(PnvXive *xive)
334 {
335     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
336                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
337     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
338                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
339     int i;
340     uint64_t eqc_watch[4];
341 
342     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
343         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
344     }
345 
346     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
347                               XIVE_VST_WORD_ALL);
348 }
349 
350 static void pnv_xive_end_cache_load(PnvXive *xive)
351 {
352     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
353                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
354     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
355                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
356     uint64_t eqc_watch[4] = { 0 };
357     int i;
358 
359     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
360         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
361     }
362 
363     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
364         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
365     }
366 }
367 
368 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
369                             XiveNVT *nvt)
370 {
371     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
372 }
373 
374 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
375                               XiveNVT *nvt, uint8_t word_number)
376 {
377     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
378                               word_number);
379 }
380 
381 static int pnv_xive_nvt_update(PnvXive *xive)
382 {
383     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
384                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
385     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
386                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
387     int i;
388     uint64_t vpc_watch[8];
389 
390     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
391         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
392     }
393 
394     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
395                               XIVE_VST_WORD_ALL);
396 }
397 
398 static void pnv_xive_nvt_cache_load(PnvXive *xive)
399 {
400     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
401                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
402     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
403                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
404     uint64_t vpc_watch[8] = { 0 };
405     int i;
406 
407     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
408         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
409     }
410 
411     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
412         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
413     }
414 }
415 
416 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
417                             XiveEAS *eas)
418 {
419     PnvXive *xive = PNV_XIVE(xrtr);
420 
421     /*
422      * EAT lookups should be local to the IC
423      */
424     if (pnv_xive_block_id(xive) != blk) {
425         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
426         return -1;
427     }
428 
429     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
430 }
431 
432 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
433                            uint8_t *pq)
434 {
435     PnvXive *xive = PNV_XIVE(xrtr);
436 
437     if (pnv_xive_block_id(xive) != blk) {
438         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
439         return -1;
440     }
441 
442     *pq = xive_source_esb_get(&xive->ipi_source, idx);
443     return 0;
444 }
445 
446 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
447                            uint8_t *pq)
448 {
449     PnvXive *xive = PNV_XIVE(xrtr);
450 
451     if (pnv_xive_block_id(xive) != blk) {
452         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
453         return -1;
454     }
455 
456     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
457     return 0;
458 }
459 
460 /*
461  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
462  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
463  * second register covers cores 16-23 (normal) or 8-11 (fused).
464  */
465 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
466 {
467     int pir = ppc_cpu_pir(cpu);
468     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
469     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
470     uint32_t bit = pir & 0x3f;
471 
472     return xive->regs[reg >> 3] & PPC_BIT(bit);
473 }
474 
475 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
476                               uint8_t nvt_blk, uint32_t nvt_idx,
477                               bool cam_ignore, uint8_t priority,
478                               uint32_t logic_serv, XiveTCTXMatch *match)
479 {
480     PnvXive *xive = PNV_XIVE(xptr);
481     PnvChip *chip = xive->chip;
482     int count = 0;
483     int i, j;
484 
485     for (i = 0; i < chip->nr_cores; i++) {
486         PnvCore *pc = chip->cores[i];
487         CPUCore *cc = CPU_CORE(pc);
488 
489         for (j = 0; j < cc->nr_threads; j++) {
490             PowerPCCPU *cpu = pc->threads[j];
491             XiveTCTX *tctx;
492             int ring;
493 
494             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
495                 continue;
496             }
497 
498             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
499 
500             /*
501              * Check the thread context CAM lines and record matches.
502              */
503             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
504                                              nvt_idx, cam_ignore, logic_serv);
505             /*
506              * Save the context and follow on to catch duplicates, that we
507              * don't support yet.
508              */
509             if (ring != -1) {
510                 if (match->tctx) {
511                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
512                                   "thread context NVT %x/%x\n",
513                                   nvt_blk, nvt_idx);
514                     return -1;
515                 }
516 
517                 match->ring = ring;
518                 match->tctx = tctx;
519                 count++;
520             }
521         }
522     }
523 
524     return count;
525 }
526 
527 static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
528 {
529     uint32_t cfg = 0;
530 
531     /* TIMA GEN1 is all P9 knows */
532     cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
533 
534     return cfg;
535 }
536 
537 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
538 {
539     return pnv_xive_block_id(PNV_XIVE(xrtr));
540 }
541 
542 /*
543  * The TIMA MMIO space is shared among the chips and to identify the
544  * chip from which the access is being done, we extract the chip id
545  * from the PIR.
546  */
547 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
548 {
549     int pir = ppc_cpu_pir(cpu);
550     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
551     PnvXive *xive = PNV_XIVE(xptr);
552 
553     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
554         xive_error(xive, "IC: CPU %x is not enabled", pir);
555     }
556     return xive;
557 }
558 
559 /*
560  * The internal sources (IPIs) of the interrupt controller have no
561  * knowledge of the XIVE chip on which they reside. Encode the block
562  * id in the source interrupt number before forwarding the source
563  * event notification to the Router. This is required on a multichip
564  * system.
565  */
566 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
567 {
568     PnvXive *xive = PNV_XIVE(xn);
569     uint8_t blk = pnv_xive_block_id(xive);
570 
571     xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
572 }
573 
574 /*
575  * XIVE helpers
576  */
577 
578 static uint64_t pnv_xive_vc_size(PnvXive *xive)
579 {
580     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
581 }
582 
583 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
584 {
585     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
586 }
587 
588 static uint64_t pnv_xive_pc_size(PnvXive *xive)
589 {
590     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
591 }
592 
593 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
594 {
595     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
596     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
597 
598     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
599 }
600 
601 /*
602  * Compute the number of entries per indirect subpage.
603  */
604 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
605 {
606     uint8_t blk = pnv_xive_block_id(xive);
607     uint64_t vsd = xive->vsds[type][blk];
608     const XiveVstInfo *info = &vst_infos[type];
609     uint64_t vsd_addr;
610     uint32_t page_shift;
611 
612     /* For direct tables, fake a valid value */
613     if (!(VSD_INDIRECT & vsd)) {
614         return 1;
615     }
616 
617     /* Get the page size of the indirect table. */
618     vsd_addr = vsd & VSD_ADDRESS_MASK;
619     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
620                    MEMTXATTRS_UNSPECIFIED)) {
621         xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
622                    info->name, vsd_addr);
623         return 0;
624     }
625 
626     if (!(vsd & VSD_ADDRESS_MASK)) {
627 #ifdef XIVE_DEBUG
628         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
629 #endif
630         return 0;
631     }
632 
633     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
634 
635     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
636         xive_error(xive, "VST: invalid %s page shift %d", info->name,
637                    page_shift);
638         return 0;
639     }
640 
641     return (1ull << page_shift) / info->size;
642 }
643 
644 /*
645  * EDT Table
646  *
647  * The Virtualization Controller MMIO region containing the IPI ESB
648  * pages and END ESB pages is sub-divided into "sets" which map
649  * portions of the VC region to the different ESB pages. It is
650  * configured at runtime through the EDT "Domain Table" to let the
651  * firmware decide how to split the VC address space between IPI ESB
652  * pages and END ESB pages.
653  */
654 
655 /*
656  * Computes the overall size of the IPI or the END ESB pages
657  */
658 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
659 {
660     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
661     uint64_t size = 0;
662     int i;
663 
664     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
665         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
666 
667         if (edt_type == type) {
668             size += edt_size;
669         }
670     }
671 
672     return size;
673 }
674 
675 /*
676  * Maps an offset of the VC region in the IPI or END region using the
677  * layout defined by the EDT "Domaine Table"
678  */
679 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
680                                               uint64_t type)
681 {
682     int i;
683     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
684     uint64_t edt_offset = vc_offset;
685 
686     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
687         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
688 
689         if (edt_type != type) {
690             edt_offset -= edt_size;
691         }
692     }
693 
694     return edt_offset;
695 }
696 
697 static void pnv_xive_edt_resize(PnvXive *xive)
698 {
699     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
700     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
701 
702     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
703     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
704 
705     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
706     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
707 }
708 
709 /*
710  * XIVE Table configuration. Only EDT is supported.
711  */
712 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
713 {
714     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
715     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
716     uint64_t *xive_table;
717     uint8_t max_index;
718 
719     switch (tsel) {
720     case CQ_TAR_TSEL_BLK:
721         max_index = ARRAY_SIZE(xive->blk);
722         xive_table = xive->blk;
723         break;
724     case CQ_TAR_TSEL_MIG:
725         max_index = ARRAY_SIZE(xive->mig);
726         xive_table = xive->mig;
727         break;
728     case CQ_TAR_TSEL_EDT:
729         max_index = ARRAY_SIZE(xive->edt);
730         xive_table = xive->edt;
731         break;
732     case CQ_TAR_TSEL_VDT:
733         max_index = ARRAY_SIZE(xive->vdt);
734         xive_table = xive->vdt;
735         break;
736     default:
737         xive_error(xive, "IC: invalid table %d", (int) tsel);
738         return -1;
739     }
740 
741     if (tsel_index >= max_index) {
742         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
743         return -1;
744     }
745 
746     xive_table[tsel_index] = val;
747 
748     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
749         xive->regs[CQ_TAR >> 3] =
750             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
751     }
752 
753     /*
754      * EDT configuration is complete. Resize the MMIO windows exposing
755      * the IPI and the END ESBs in the VC region.
756      */
757     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
758         pnv_xive_edt_resize(xive);
759     }
760 
761     return 0;
762 }
763 
764 /*
765  * Virtual Structure Tables (VST) configuration
766  */
767 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
768                                        uint8_t blk, uint64_t vsd)
769 {
770     XiveENDSource *end_xsrc = &xive->end_source;
771     XiveSource *xsrc = &xive->ipi_source;
772     const XiveVstInfo *info = &vst_infos[type];
773     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
774     uint64_t vst_tsize = 1ull << page_shift;
775     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
776 
777     /* Basic checks */
778 
779     if (VSD_INDIRECT & vsd) {
780         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
781             xive_error(xive, "VST: %s indirect tables are not enabled",
782                        info->name);
783             return;
784         }
785 
786         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
787             xive_error(xive, "VST: invalid %s page shift %d", info->name,
788                        page_shift);
789             return;
790         }
791     }
792 
793     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
794         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
795                    " page shift %d", info->name, vst_addr, page_shift);
796         return;
797     }
798 
799     /* Record the table configuration (in SRAM on HW) */
800     xive->vsds[type][blk] = vsd;
801 
802     /* Now tune the models with the configuration provided by the FW */
803 
804     switch (type) {
805     case VST_TSEL_IVT:  /* Nothing to be done */
806         break;
807 
808     case VST_TSEL_EQDT:
809         /*
810          * Backing store pages for the END.
811          *
812          * If the table is direct, we can compute the number of PQ
813          * entries provisioned by FW (such as skiboot) and resize the
814          * END ESB window accordingly.
815          */
816         if (!(VSD_INDIRECT & vsd)) {
817             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
818                                    * (1ull << xsrc->esb_shift));
819         }
820         memory_region_add_subregion(&xive->end_edt_mmio, 0,
821                                     &end_xsrc->esb_mmio);
822         break;
823 
824     case VST_TSEL_SBE:
825         /*
826          * Backing store pages for the source PQ bits. The model does
827          * not use these PQ bits backed in RAM because the XiveSource
828          * model has its own.
829          *
830          * If the table is direct, we can compute the number of PQ
831          * entries provisioned by FW (such as skiboot) and resize the
832          * ESB window accordingly.
833          */
834         if (!(VSD_INDIRECT & vsd)) {
835             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
836                                    * (1ull << xsrc->esb_shift));
837         }
838         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
839         break;
840 
841     case VST_TSEL_VPDT: /* Not modeled */
842     case VST_TSEL_IRQ:  /* Not modeled */
843         /*
844          * These tables contains the backing store pages for the
845          * interrupt fifos of the VC sub-engine in case of overflow.
846          */
847         break;
848 
849     default:
850         g_assert_not_reached();
851     }
852 }
853 
854 /*
855  * Both PC and VC sub-engines are configured as each use the Virtual
856  * Structure Tables : SBE, EAS, END and NVT.
857  */
858 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
859 {
860     uint8_t mode = GETFIELD(VSD_MODE, vsd);
861     uint8_t type = GETFIELD(VST_TABLE_SELECT,
862                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
863     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
864                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
865     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
866 
867     if (type > VST_TSEL_IRQ) {
868         xive_error(xive, "VST: invalid table type %d", type);
869         return;
870     }
871 
872     if (blk >= vst_infos[type].max_blocks) {
873         xive_error(xive, "VST: invalid block id %d for"
874                       " %s table", blk, vst_infos[type].name);
875         return;
876     }
877 
878     /*
879      * Only take the VC sub-engine configuration into account because
880      * the XiveRouter model combines both VC and PC sub-engines
881      */
882     if (pc_engine) {
883         return;
884     }
885 
886     if (!vst_addr) {
887         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
888         return;
889     }
890 
891     switch (mode) {
892     case VSD_MODE_FORWARD:
893         xive->vsds[type][blk] = vsd;
894         break;
895 
896     case VSD_MODE_EXCLUSIVE:
897         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
898         break;
899 
900     default:
901         xive_error(xive, "VST: unsupported table mode %d", mode);
902         return;
903     }
904 }
905 
906 /*
907  * Interrupt controller MMIO region. The layout is compatible between
908  * 4K and 64K pages :
909  *
910  * Page 0           sub-engine BARs
911  *  0x000 - 0x3FF   IC registers
912  *  0x400 - 0x7FF   PC registers
913  *  0x800 - 0xFFF   VC registers
914  *
915  * Page 1           Notify page (writes only)
916  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
917  *  0x800 - 0xFFF   forwards and syncs
918  *
919  * Page 2           LSI Trigger page (writes only) (not modeled)
920  * Page 3           LSI SB EOI page (reads only) (not modeled)
921  *
922  * Page 4-7         indirect TIMA
923  */
924 
925 /*
926  * IC - registers MMIO
927  */
928 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
929                                   uint64_t val, unsigned size)
930 {
931     PnvXive *xive = PNV_XIVE(opaque);
932     MemoryRegion *sysmem = get_system_memory();
933     uint32_t reg = offset >> 3;
934     bool is_chip0 = xive->chip->chip_id == 0;
935 
936     switch (offset) {
937 
938     /*
939      * XIVE CQ (PowerBus bridge) settings
940      */
941     case CQ_MSGSND:     /* msgsnd for doorbells */
942     case CQ_FIRMASK_OR: /* FIR error reporting */
943         break;
944     case CQ_PBI_CTL:
945         if (val & CQ_PBI_PC_64K) {
946             xive->pc_shift = 16;
947         }
948         if (val & CQ_PBI_VC_64K) {
949             xive->vc_shift = 16;
950         }
951         break;
952     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
953         /*
954          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
955          */
956         break;
957 
958     /*
959      * XIVE Virtualization Controller settings
960      */
961     case VC_GLOBAL_CONFIG:
962         break;
963 
964     /*
965      * XIVE Presenter Controller settings
966      */
967     case PC_GLOBAL_CONFIG:
968         /*
969          * PC_GCONF_CHIPID_OVR
970          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
971          */
972         break;
973     case PC_TCTXT_CFG:
974         /*
975          * TODO: block group support
976          */
977         break;
978     case PC_TCTXT_TRACK:
979         /*
980          * PC_TCTXT_TRACK_EN:
981          *   enable block tracking and exchange of block ownership
982          *   information between Interrupt controllers
983          */
984         break;
985 
986     /*
987      * Misc settings
988      */
989     case VC_SBC_CONFIG: /* Store EOI configuration */
990         /*
991          * Configure store EOI if required by firwmare (skiboot has removed
992          * support recently though)
993          */
994         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
995             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
996         }
997         break;
998 
999     case VC_EQC_CONFIG: /* TODO: silent escalation */
1000     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
1001         break;
1002 
1003     /*
1004      * XIVE BAR settings (XSCOM only)
1005      */
1006     case CQ_RST_CTL:
1007         /* bit4: resets all BAR registers */
1008         break;
1009 
1010     case CQ_IC_BAR: /* IC BAR. 8 pages */
1011         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1012         if (!(val & CQ_IC_BAR_VALID)) {
1013             xive->ic_base = 0;
1014             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
1015                 memory_region_del_subregion(&xive->ic_mmio,
1016                                             &xive->ic_reg_mmio);
1017                 memory_region_del_subregion(&xive->ic_mmio,
1018                                             &xive->ic_notify_mmio);
1019                 memory_region_del_subregion(&xive->ic_mmio,
1020                                             &xive->ic_lsi_mmio);
1021                 memory_region_del_subregion(&xive->ic_mmio,
1022                                             &xive->tm_indirect_mmio);
1023 
1024                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
1025             }
1026         } else {
1027             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1028             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
1029                 memory_region_add_subregion(sysmem, xive->ic_base,
1030                                             &xive->ic_mmio);
1031 
1032                 memory_region_add_subregion(&xive->ic_mmio,  0,
1033                                             &xive->ic_reg_mmio);
1034                 memory_region_add_subregion(&xive->ic_mmio,
1035                                             1ul << xive->ic_shift,
1036                                             &xive->ic_notify_mmio);
1037                 memory_region_add_subregion(&xive->ic_mmio,
1038                                             2ul << xive->ic_shift,
1039                                             &xive->ic_lsi_mmio);
1040                 memory_region_add_subregion(&xive->ic_mmio,
1041                                             4ull << xive->ic_shift,
1042                                             &xive->tm_indirect_mmio);
1043             }
1044         }
1045         break;
1046 
1047     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1048     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1049         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1050         if (!(val & CQ_TM_BAR_VALID)) {
1051             xive->tm_base = 0;
1052             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1053                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1054             }
1055         } else {
1056             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1057             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1058                 memory_region_add_subregion(sysmem, xive->tm_base,
1059                                             &xive->tm_mmio);
1060             }
1061         }
1062         break;
1063 
1064     case CQ_PC_BARM:
1065         xive->regs[reg] = val;
1066         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1067         break;
1068     case CQ_PC_BAR: /* From 32M to 512G */
1069         if (!(val & CQ_PC_BAR_VALID)) {
1070             xive->pc_base = 0;
1071             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1072                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1073             }
1074         } else {
1075             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1076             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1077                 memory_region_add_subregion(sysmem, xive->pc_base,
1078                                             &xive->pc_mmio);
1079             }
1080         }
1081         break;
1082 
1083     case CQ_VC_BARM:
1084         xive->regs[reg] = val;
1085         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1086         break;
1087     case CQ_VC_BAR: /* From 64M to 4TB */
1088         if (!(val & CQ_VC_BAR_VALID)) {
1089             xive->vc_base = 0;
1090             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1091                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1092             }
1093         } else {
1094             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1095             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1096                 memory_region_add_subregion(sysmem, xive->vc_base,
1097                                             &xive->vc_mmio);
1098             }
1099         }
1100         break;
1101 
1102     /*
1103      * XIVE Table settings.
1104      */
1105     case CQ_TAR: /* Table Address */
1106         break;
1107     case CQ_TDR: /* Table Data */
1108         pnv_xive_table_set_data(xive, val);
1109         break;
1110 
1111     /*
1112      * XIVE VC & PC Virtual Structure Table settings
1113      */
1114     case VC_VSD_TABLE_ADDR:
1115     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1116         break;
1117     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1118     case PC_VSD_TABLE_DATA:
1119         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1120         break;
1121 
1122     /*
1123      * Interrupt fifo overflow in memory backing store (Not modeled)
1124      */
1125     case VC_IRQ_CONFIG_IPI:
1126     case VC_IRQ_CONFIG_HW:
1127     case VC_IRQ_CONFIG_CASCADE1:
1128     case VC_IRQ_CONFIG_CASCADE2:
1129     case VC_IRQ_CONFIG_REDIST:
1130     case VC_IRQ_CONFIG_IPI_CASC:
1131         break;
1132 
1133     /*
1134      * XIVE hardware thread enablement
1135      */
1136     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1137     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1138         break;
1139 
1140     case PC_THREAD_EN_REG0_SET:
1141         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1142         break;
1143     case PC_THREAD_EN_REG1_SET:
1144         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1145         break;
1146     case PC_THREAD_EN_REG0_CLR:
1147         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1148         break;
1149     case PC_THREAD_EN_REG1_CLR:
1150         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1151         break;
1152 
1153     /*
1154      * Indirect TIMA access set up. Defines the PIR of the HW thread
1155      * to use.
1156      */
1157     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1158         break;
1159 
1160     /*
1161      * XIVE PC & VC cache updates for EAS, NVT and END
1162      */
1163     case VC_IVC_SCRUB_MASK:
1164     case VC_IVC_SCRUB_TRIG:
1165         break;
1166 
1167     case VC_EQC_CWATCH_SPEC:
1168         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1169         break;
1170     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1171         break;
1172     case VC_EQC_CWATCH_DAT0:
1173         /* writing to DATA0 triggers the cache write */
1174         xive->regs[reg] = val;
1175         pnv_xive_end_update(xive);
1176         break;
1177     case VC_EQC_SCRUB_MASK:
1178     case VC_EQC_SCRUB_TRIG:
1179         /*
1180          * The scrubbing registers flush the cache in RAM and can also
1181          * invalidate.
1182          */
1183         break;
1184 
1185     case PC_VPC_CWATCH_SPEC:
1186         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1187         break;
1188     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1189         break;
1190     case PC_VPC_CWATCH_DAT0:
1191         /* writing to DATA0 triggers the cache write */
1192         xive->regs[reg] = val;
1193         pnv_xive_nvt_update(xive);
1194         break;
1195     case PC_VPC_SCRUB_MASK:
1196     case PC_VPC_SCRUB_TRIG:
1197         /*
1198          * The scrubbing registers flush the cache in RAM and can also
1199          * invalidate.
1200          */
1201         break;
1202 
1203 
1204     /*
1205      * XIVE PC & VC cache invalidation
1206      */
1207     case PC_AT_KILL:
1208         break;
1209     case VC_AT_MACRO_KILL:
1210         break;
1211     case PC_AT_KILL_MASK:
1212     case VC_AT_MACRO_KILL_MASK:
1213         break;
1214 
1215     default:
1216         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1217         return;
1218     }
1219 
1220     xive->regs[reg] = val;
1221 }
1222 
1223 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1224 {
1225     PnvXive *xive = PNV_XIVE(opaque);
1226     uint64_t val = 0;
1227     uint32_t reg = offset >> 3;
1228 
1229     switch (offset) {
1230     case CQ_CFG_PB_GEN:
1231     case CQ_IC_BAR:
1232     case CQ_TM1_BAR:
1233     case CQ_TM2_BAR:
1234     case CQ_PC_BAR:
1235     case CQ_PC_BARM:
1236     case CQ_VC_BAR:
1237     case CQ_VC_BARM:
1238     case CQ_TAR:
1239     case CQ_TDR:
1240     case CQ_PBI_CTL:
1241 
1242     case PC_TCTXT_CFG:
1243     case PC_TCTXT_TRACK:
1244     case PC_TCTXT_INDIR0:
1245     case PC_TCTXT_INDIR1:
1246     case PC_TCTXT_INDIR2:
1247     case PC_TCTXT_INDIR3:
1248     case PC_GLOBAL_CONFIG:
1249 
1250     case PC_VPC_SCRUB_MASK:
1251 
1252     case VC_GLOBAL_CONFIG:
1253     case VC_AIB_TX_ORDER_TAG2:
1254 
1255     case VC_IRQ_CONFIG_IPI:
1256     case VC_IRQ_CONFIG_HW:
1257     case VC_IRQ_CONFIG_CASCADE1:
1258     case VC_IRQ_CONFIG_CASCADE2:
1259     case VC_IRQ_CONFIG_REDIST:
1260     case VC_IRQ_CONFIG_IPI_CASC:
1261 
1262     case VC_EQC_SCRUB_MASK:
1263     case VC_IVC_SCRUB_MASK:
1264     case VC_SBC_CONFIG:
1265     case VC_AT_MACRO_KILL_MASK:
1266     case VC_VSD_TABLE_ADDR:
1267     case PC_VSD_TABLE_ADDR:
1268     case VC_VSD_TABLE_DATA:
1269     case PC_VSD_TABLE_DATA:
1270     case PC_THREAD_EN_REG0:
1271     case PC_THREAD_EN_REG1:
1272         val = xive->regs[reg];
1273         break;
1274 
1275     /*
1276      * XIVE hardware thread enablement
1277      */
1278     case PC_THREAD_EN_REG0_SET:
1279     case PC_THREAD_EN_REG0_CLR:
1280         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1281         break;
1282     case PC_THREAD_EN_REG1_SET:
1283     case PC_THREAD_EN_REG1_CLR:
1284         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1285         break;
1286 
1287     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1288         val = 0xffffff0000000000;
1289         break;
1290 
1291     /*
1292      * XIVE PC & VC cache updates for EAS, NVT and END
1293      */
1294     case VC_EQC_CWATCH_SPEC:
1295         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1296         val = xive->regs[reg];
1297         break;
1298     case VC_EQC_CWATCH_DAT0:
1299         /*
1300          * Load DATA registers from cache with data requested by the
1301          * SPEC register
1302          */
1303         pnv_xive_end_cache_load(xive);
1304         val = xive->regs[reg];
1305         break;
1306     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1307         val = xive->regs[reg];
1308         break;
1309 
1310     case PC_VPC_CWATCH_SPEC:
1311         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1312         val = xive->regs[reg];
1313         break;
1314     case PC_VPC_CWATCH_DAT0:
1315         /*
1316          * Load DATA registers from cache with data requested by the
1317          * SPEC register
1318          */
1319         pnv_xive_nvt_cache_load(xive);
1320         val = xive->regs[reg];
1321         break;
1322     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1323         val = xive->regs[reg];
1324         break;
1325 
1326     case PC_VPC_SCRUB_TRIG:
1327     case VC_IVC_SCRUB_TRIG:
1328     case VC_EQC_SCRUB_TRIG:
1329         xive->regs[reg] &= ~VC_SCRUB_VALID;
1330         val = xive->regs[reg];
1331         break;
1332 
1333     /*
1334      * XIVE PC & VC cache invalidation
1335      */
1336     case PC_AT_KILL:
1337         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1338         val = xive->regs[reg];
1339         break;
1340     case VC_AT_MACRO_KILL:
1341         xive->regs[reg] &= ~VC_KILL_VALID;
1342         val = xive->regs[reg];
1343         break;
1344 
1345     /*
1346      * XIVE synchronisation
1347      */
1348     case VC_EQC_CONFIG:
1349         val = VC_EQC_SYNC_MASK;
1350         break;
1351 
1352     default:
1353         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1354     }
1355 
1356     return val;
1357 }
1358 
1359 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1360     .read = pnv_xive_ic_reg_read,
1361     .write = pnv_xive_ic_reg_write,
1362     .endianness = DEVICE_BIG_ENDIAN,
1363     .valid = {
1364         .min_access_size = 8,
1365         .max_access_size = 8,
1366     },
1367     .impl = {
1368         .min_access_size = 8,
1369         .max_access_size = 8,
1370     },
1371 };
1372 
1373 /*
1374  * IC - Notify MMIO port page (write only)
1375  */
1376 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1377 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1378 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1379 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1380 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1381 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1382 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1383 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1384 
1385 /* VC synchronisation */
1386 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1387 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1388 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1389 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1390 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1391 
1392 /* PC synchronisation */
1393 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1394 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1395 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1396 
1397 static void pnv_xive_end_notify(XiveRouter *xrtr, XiveEAS *eas)
1398 {
1399     PnvXive *xive = PNV_XIVE(xrtr);
1400     uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
1401     uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
1402     uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
1403     uint64_t end_vsd = xive->vsds[VST_TSEL_EQDT][end_blk];
1404 
1405     switch (GETFIELD(VSD_MODE, end_vsd)) {
1406     case VSD_MODE_EXCLUSIVE:
1407         /* Perform the END notification on the local IC. */
1408         xive_router_end_notify(xrtr, eas);
1409         break;
1410 
1411     case VSD_MODE_FORWARD: {
1412         MemTxResult result;
1413         uint64_t notif_port = end_vsd & VSD_ADDRESS_MASK;
1414         uint64_t data = XIVE_TRIGGER_END | XIVE_TRIGGER_PQ |
1415             be64_to_cpu(eas->w);
1416 
1417         /* Forward the store on the remote IC notify page. */
1418         address_space_stq_be(&address_space_memory, notif_port, data,
1419                              MEMTXATTRS_UNSPECIFIED, &result);
1420         if (result != MEMTX_OK) {
1421             xive_error(xive, "IC: Forward notif END %x/%x [%x] failed @%"
1422                        HWADDR_PRIx, end_blk, end_idx, end_data, notif_port);
1423             return;
1424         }
1425         break;
1426     }
1427 
1428     case VSD_MODE_INVALID:
1429     default:
1430         /* Set FIR */
1431         xive_error(xive, "IC: Invalid END VSD for block %x", end_blk);
1432         return;
1433     }
1434 }
1435 
1436 /*
1437  * The notify page can either be used to receive trigger events from
1438  * the HW controllers (PHB, PSI) or to reroute interrupts between
1439  * Interrupt controllers.
1440  */
1441 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1442 {
1443     uint8_t blk;
1444     uint32_t idx;
1445 
1446     trace_pnv_xive_ic_hw_trigger(addr, val);
1447 
1448     if (val & XIVE_TRIGGER_END) {
1449         val = cpu_to_be64(val);
1450         pnv_xive_end_notify(XIVE_ROUTER(xive), (XiveEAS *) &val);
1451         return;
1452     }
1453 
1454     /*
1455      * Forward the source event notification directly to the Router.
1456      * The source interrupt number should already be correctly encoded
1457      * with the chip block id by the sending device (PHB, PSI).
1458      */
1459     blk = XIVE_EAS_BLOCK(val);
1460     idx = XIVE_EAS_INDEX(val);
1461 
1462     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1463                        !!(val & XIVE_TRIGGER_PQ));
1464 }
1465 
1466 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1467                                      unsigned size)
1468 {
1469     PnvXive *xive = PNV_XIVE(opaque);
1470 
1471     /* VC: HW triggers */
1472     switch (addr) {
1473     case 0x000 ... 0x7FF:
1474         pnv_xive_ic_hw_trigger(opaque, addr, val);
1475         break;
1476 
1477     /* VC: Forwarded IRQs */
1478     case PNV_XIVE_FORWARD_IPI:
1479     case PNV_XIVE_FORWARD_HW:
1480     case PNV_XIVE_FORWARD_OS_ESC:
1481     case PNV_XIVE_FORWARD_HW_ESC:
1482     case PNV_XIVE_FORWARD_REDIS:
1483         /* TODO: forwarded IRQs. Should be like HW triggers */
1484         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1485                    addr, val);
1486         break;
1487 
1488     /* VC syncs */
1489     case PNV_XIVE_SYNC_IPI:
1490     case PNV_XIVE_SYNC_HW:
1491     case PNV_XIVE_SYNC_OS_ESC:
1492     case PNV_XIVE_SYNC_HW_ESC:
1493     case PNV_XIVE_SYNC_REDIS:
1494         break;
1495 
1496     /* PC syncs */
1497     case PNV_XIVE_SYNC_PULL:
1498     case PNV_XIVE_SYNC_PUSH:
1499     case PNV_XIVE_SYNC_VPC:
1500         break;
1501 
1502     default:
1503         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1504     }
1505 }
1506 
1507 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1508                                         unsigned size)
1509 {
1510     PnvXive *xive = PNV_XIVE(opaque);
1511 
1512     /* loads are invalid */
1513     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1514     return -1;
1515 }
1516 
1517 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1518     .read = pnv_xive_ic_notify_read,
1519     .write = pnv_xive_ic_notify_write,
1520     .endianness = DEVICE_BIG_ENDIAN,
1521     .valid = {
1522         .min_access_size = 8,
1523         .max_access_size = 8,
1524     },
1525     .impl = {
1526         .min_access_size = 8,
1527         .max_access_size = 8,
1528     },
1529 };
1530 
1531 /*
1532  * IC - LSI MMIO handlers (not modeled)
1533  */
1534 
1535 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1536                               uint64_t val, unsigned size)
1537 {
1538     PnvXive *xive = PNV_XIVE(opaque);
1539 
1540     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1541 }
1542 
1543 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1544 {
1545     PnvXive *xive = PNV_XIVE(opaque);
1546 
1547     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1548     return -1;
1549 }
1550 
1551 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1552     .read = pnv_xive_ic_lsi_read,
1553     .write = pnv_xive_ic_lsi_write,
1554     .endianness = DEVICE_BIG_ENDIAN,
1555     .valid = {
1556         .min_access_size = 8,
1557         .max_access_size = 8,
1558     },
1559     .impl = {
1560         .min_access_size = 8,
1561         .max_access_size = 8,
1562     },
1563 };
1564 
1565 /*
1566  * IC - Indirect TIMA MMIO handlers
1567  */
1568 
1569 /*
1570  * When the TIMA is accessed from the indirect page, the thread id of
1571  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1572  * use. This is used for resets and for debug purpose also.
1573  */
1574 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1575 {
1576     PnvChip *chip = xive->chip;
1577     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1578     PowerPCCPU *cpu = NULL;
1579     int pir;
1580 
1581     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1582         xive_error(xive, "IC: no indirect TIMA access in progress");
1583         return NULL;
1584     }
1585 
1586     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1587     cpu = pnv_chip_find_cpu(chip, pir);
1588     if (!cpu) {
1589         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1590         return NULL;
1591     }
1592 
1593     /* Check that HW thread is XIVE enabled */
1594     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1595         xive_error(xive, "IC: CPU %x is not enabled", pir);
1596     }
1597 
1598     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1599 }
1600 
1601 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1602                                    uint64_t value, unsigned size)
1603 {
1604     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1605 
1606     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1607 }
1608 
1609 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1610                                       unsigned size)
1611 {
1612     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1613 
1614     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1615 }
1616 
1617 static const MemoryRegionOps xive_tm_indirect_ops = {
1618     .read = xive_tm_indirect_read,
1619     .write = xive_tm_indirect_write,
1620     .endianness = DEVICE_BIG_ENDIAN,
1621     .valid = {
1622         .min_access_size = 1,
1623         .max_access_size = 8,
1624     },
1625     .impl = {
1626         .min_access_size = 1,
1627         .max_access_size = 8,
1628     },
1629 };
1630 
1631 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1632                               uint64_t value, unsigned size)
1633 {
1634     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1635     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1636     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1637 
1638     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1639 }
1640 
1641 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1642 {
1643     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1644     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1645     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1646 
1647     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1648 }
1649 
1650 const MemoryRegionOps pnv_xive_tm_ops = {
1651     .read = pnv_xive_tm_read,
1652     .write = pnv_xive_tm_write,
1653     .endianness = DEVICE_BIG_ENDIAN,
1654     .valid = {
1655         .min_access_size = 1,
1656         .max_access_size = 8,
1657     },
1658     .impl = {
1659         .min_access_size = 1,
1660         .max_access_size = 8,
1661     },
1662 };
1663 
1664 /*
1665  * Interrupt controller XSCOM region.
1666  */
1667 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1668 {
1669     switch (addr >> 3) {
1670     case X_VC_EQC_CONFIG:
1671         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1672         return VC_EQC_SYNC_MASK;
1673     default:
1674         return pnv_xive_ic_reg_read(opaque, addr, size);
1675     }
1676 }
1677 
1678 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1679                                 uint64_t val, unsigned size)
1680 {
1681     pnv_xive_ic_reg_write(opaque, addr, val, size);
1682 }
1683 
1684 static const MemoryRegionOps pnv_xive_xscom_ops = {
1685     .read = pnv_xive_xscom_read,
1686     .write = pnv_xive_xscom_write,
1687     .endianness = DEVICE_BIG_ENDIAN,
1688     .valid = {
1689         .min_access_size = 8,
1690         .max_access_size = 8,
1691     },
1692     .impl = {
1693         .min_access_size = 8,
1694         .max_access_size = 8,
1695     }
1696 };
1697 
1698 /*
1699  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1700  */
1701 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1702                                  unsigned size)
1703 {
1704     PnvXive *xive = PNV_XIVE(opaque);
1705     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1706     uint64_t edt_type = 0;
1707     uint64_t edt_offset;
1708     MemTxResult result;
1709     AddressSpace *edt_as = NULL;
1710     uint64_t ret = -1;
1711 
1712     if (edt_index < XIVE_TABLE_EDT_MAX) {
1713         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1714     }
1715 
1716     switch (edt_type) {
1717     case CQ_TDR_EDT_IPI:
1718         edt_as = &xive->ipi_as;
1719         break;
1720     case CQ_TDR_EDT_EQ:
1721         edt_as = &xive->end_as;
1722         break;
1723     default:
1724         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1725         return -1;
1726     }
1727 
1728     /* Remap the offset for the targeted address space */
1729     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1730 
1731     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1732                             &result);
1733 
1734     if (result != MEMTX_OK) {
1735         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1736                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1737                    offset, edt_offset);
1738         return -1;
1739     }
1740 
1741     return ret;
1742 }
1743 
1744 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1745                               uint64_t val, unsigned size)
1746 {
1747     PnvXive *xive = PNV_XIVE(opaque);
1748     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1749     uint64_t edt_type = 0;
1750     uint64_t edt_offset;
1751     MemTxResult result;
1752     AddressSpace *edt_as = NULL;
1753 
1754     if (edt_index < XIVE_TABLE_EDT_MAX) {
1755         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1756     }
1757 
1758     switch (edt_type) {
1759     case CQ_TDR_EDT_IPI:
1760         edt_as = &xive->ipi_as;
1761         break;
1762     case CQ_TDR_EDT_EQ:
1763         edt_as = &xive->end_as;
1764         break;
1765     default:
1766         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1767                    offset);
1768         return;
1769     }
1770 
1771     /* Remap the offset for the targeted address space */
1772     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1773 
1774     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1775     if (result != MEMTX_OK) {
1776         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1777     }
1778 }
1779 
1780 static const MemoryRegionOps pnv_xive_vc_ops = {
1781     .read = pnv_xive_vc_read,
1782     .write = pnv_xive_vc_write,
1783     .endianness = DEVICE_BIG_ENDIAN,
1784     .valid = {
1785         .min_access_size = 8,
1786         .max_access_size = 8,
1787     },
1788     .impl = {
1789         .min_access_size = 8,
1790         .max_access_size = 8,
1791     },
1792 };
1793 
1794 /*
1795  * Presenter Controller MMIO region. Points to the NVT sets.
1796  *
1797  * HW implements all possible mem ops to the underlying NVT structure
1798  * but QEMU does not need to be so precise. The model implementation
1799  * simply returns the RAM address of the NVT structure which is then
1800  * used by pnv_xive_vst_write/read to perform the RAM operation.
1801  */
1802 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr offset, unsigned size)
1803 {
1804     PnvXive *xive = PNV_XIVE(opaque);
1805     uint32_t nvt_idx = offset >> xive->pc_shift;
1806     uint8_t blk = pnv_xive_block_id(xive); /* TODO: VDT -> block xlate */
1807 
1808     return pnv_xive_vst_addr(xive, VST_TSEL_VPDT, blk, nvt_idx);
1809 }
1810 
1811 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1812                               uint64_t value, unsigned size)
1813 {
1814     PnvXive *xive = PNV_XIVE(opaque);
1815 
1816     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1817 }
1818 
1819 static const MemoryRegionOps pnv_xive_pc_ops = {
1820     .read = pnv_xive_pc_read,
1821     .write = pnv_xive_pc_write,
1822     .endianness = DEVICE_BIG_ENDIAN,
1823     .valid = {
1824         .min_access_size = 8,
1825         .max_access_size = 8,
1826     },
1827     .impl = {
1828         .min_access_size = 8,
1829         .max_access_size = 8,
1830     },
1831 };
1832 
1833 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1834                                     Monitor *mon)
1835 {
1836     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1837     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1838 
1839     if (!xive_nvt_is_valid(nvt)) {
1840         return;
1841     }
1842 
1843     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1844                    eq_blk, eq_idx,
1845                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1846 }
1847 
1848 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1849 {
1850     XiveRouter *xrtr = XIVE_ROUTER(xive);
1851     uint8_t blk = pnv_xive_block_id(xive);
1852     uint8_t chip_id = xive->chip->chip_id;
1853     uint32_t srcno0 = XIVE_EAS(blk, 0);
1854     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1855     XiveEAS eas;
1856     XiveEND end;
1857     XiveNVT nvt;
1858     int i;
1859     uint64_t xive_nvt_per_subpage;
1860 
1861     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1862                    srcno0, srcno0 + nr_ipis - 1);
1863     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1864 
1865     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1866                    srcno0, srcno0 + nr_ipis - 1);
1867     for (i = 0; i < nr_ipis; i++) {
1868         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1869             break;
1870         }
1871         if (!xive_eas_is_masked(&eas)) {
1872             xive_eas_pic_print_info(&eas, i, mon);
1873         }
1874     }
1875 
1876     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1877     i = 0;
1878     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1879         xive_end_pic_print_info(&end, i++, mon);
1880     }
1881 
1882     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1883     i = 0;
1884     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1885         xive_end_eas_pic_print_info(&end, i++, mon);
1886     }
1887 
1888     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1889                    0, XIVE_NVT_COUNT - 1);
1890     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1891     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1892         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1893             xive_nvt_pic_print_info(&nvt, i++, mon);
1894         }
1895     }
1896 }
1897 
1898 static void pnv_xive_reset(void *dev)
1899 {
1900     PnvXive *xive = PNV_XIVE(dev);
1901     XiveSource *xsrc = &xive->ipi_source;
1902     XiveENDSource *end_xsrc = &xive->end_source;
1903 
1904     /* Default page size (Should be changed at runtime to 64k) */
1905     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1906 
1907     /* Clear subregions */
1908     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1909         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1910     }
1911 
1912     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1913         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1914     }
1915 
1916     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1917         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1918     }
1919 
1920     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1921         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1922     }
1923 }
1924 
1925 static void pnv_xive_init(Object *obj)
1926 {
1927     PnvXive *xive = PNV_XIVE(obj);
1928 
1929     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1930                             TYPE_XIVE_SOURCE);
1931     object_initialize_child(obj, "end_source", &xive->end_source,
1932                             TYPE_XIVE_END_SOURCE);
1933 }
1934 
1935 /*
1936  *  Maximum number of IRQs and ENDs supported by HW
1937  */
1938 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1939 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1940 
1941 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1942 {
1943     PnvXive *xive = PNV_XIVE(dev);
1944     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1945     XiveSource *xsrc = &xive->ipi_source;
1946     XiveENDSource *end_xsrc = &xive->end_source;
1947     Error *local_err = NULL;
1948 
1949     pxc->parent_realize(dev, &local_err);
1950     if (local_err) {
1951         error_propagate(errp, local_err);
1952         return;
1953     }
1954 
1955     assert(xive->chip);
1956 
1957     /*
1958      * The XiveSource and XiveENDSource objects are realized with the
1959      * maximum allowed HW configuration. The ESB MMIO regions will be
1960      * resized dynamically when the controller is configured by the FW
1961      * to limit accesses to resources not provisioned.
1962      */
1963     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1964                             &error_fatal);
1965     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1966     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1967         return;
1968     }
1969 
1970     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1971                             &error_fatal);
1972     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1973                              &error_abort);
1974     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1975         return;
1976     }
1977 
1978     /* Default page size. Generally changed at runtime to 64k */
1979     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1980 
1981     /* XSCOM region, used for initial configuration of the BARs */
1982     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1983                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1984 
1985     /* Interrupt controller MMIO regions */
1986     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1987                        PNV9_XIVE_IC_SIZE);
1988 
1989     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1990                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1991     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1992                           &pnv_xive_ic_notify_ops,
1993                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1994     xive->ic_notify_mmio.disable_reentrancy_guard = true;
1995 
1996     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1997     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1998                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1999 
2000     /* Thread Interrupt Management Area (Indirect) */
2001     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
2002                           &xive_tm_indirect_ops,
2003                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
2004     /*
2005      * Overall Virtualization Controller MMIO region containing the
2006      * IPI ESB pages and END ESB pages. The layout is defined by the
2007      * EDT "Domain table" and the accesses are dispatched using
2008      * address spaces for each.
2009      */
2010     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
2011                           "xive-vc", PNV9_XIVE_VC_SIZE);
2012 
2013     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
2014                        PNV9_XIVE_VC_SIZE);
2015     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
2016     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
2017                        PNV9_XIVE_VC_SIZE);
2018     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
2019 
2020     /*
2021      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
2022      * VC region. Their size is configured by the FW in the EDT table.
2023      */
2024     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
2025     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
2026 
2027     /* Presenter Controller MMIO region (not modeled) */
2028     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
2029                           "xive-pc", PNV9_XIVE_PC_SIZE);
2030     xive->pc_mmio.disable_reentrancy_guard = true;
2031 
2032     /* Thread Interrupt Management Area (Direct) */
2033     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
2034                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
2035 
2036     qemu_register_reset(pnv_xive_reset, dev);
2037 }
2038 
2039 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
2040                              int xscom_offset)
2041 {
2042     const char compat[] = "ibm,power9-xive-x";
2043     char *name;
2044     int offset;
2045     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
2046     uint32_t reg[] = {
2047         cpu_to_be32(lpc_pcba),
2048         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
2049     };
2050 
2051     name = g_strdup_printf("xive@%x", lpc_pcba);
2052     offset = fdt_add_subnode(fdt, xscom_offset, name);
2053     _FDT(offset);
2054     g_free(name);
2055 
2056     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2057     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
2058                       sizeof(compat))));
2059     return 0;
2060 }
2061 
2062 static Property pnv_xive_properties[] = {
2063     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
2064     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
2065     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
2066     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
2067     /* The PnvChip id identifies the XIVE interrupt controller. */
2068     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
2069     DEFINE_PROP_END_OF_LIST(),
2070 };
2071 
2072 static void pnv_xive_class_init(ObjectClass *klass, void *data)
2073 {
2074     DeviceClass *dc = DEVICE_CLASS(klass);
2075     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2076     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
2077     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2078     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2079     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
2080 
2081     xdc->dt_xscom = pnv_xive_dt_xscom;
2082 
2083     dc->desc = "PowerNV XIVE Interrupt Controller";
2084     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
2085     dc->realize = pnv_xive_realize;
2086     device_class_set_props(dc, pnv_xive_properties);
2087 
2088     xrc->get_eas = pnv_xive_get_eas;
2089     xrc->get_pq = pnv_xive_get_pq;
2090     xrc->set_pq = pnv_xive_set_pq;
2091     xrc->get_end = pnv_xive_get_end;
2092     xrc->write_end = pnv_xive_write_end;
2093     xrc->get_nvt = pnv_xive_get_nvt;
2094     xrc->write_nvt = pnv_xive_write_nvt;
2095     xrc->get_block_id = pnv_xive_get_block_id;
2096     xrc->end_notify = pnv_xive_end_notify;
2097 
2098     xnc->notify = pnv_xive_notify;
2099     xpc->match_nvt  = pnv_xive_match_nvt;
2100     xpc->get_config = pnv_xive_presenter_get_config;
2101 };
2102 
2103 static const TypeInfo pnv_xive_info = {
2104     .name          = TYPE_PNV_XIVE,
2105     .parent        = TYPE_XIVE_ROUTER,
2106     .instance_init = pnv_xive_init,
2107     .instance_size = sizeof(PnvXive),
2108     .class_init    = pnv_xive_class_init,
2109     .class_size    = sizeof(PnvXiveClass),
2110     .interfaces    = (InterfaceInfo[]) {
2111         { TYPE_PNV_XSCOM_INTERFACE },
2112         { }
2113     }
2114 };
2115 
2116 static void pnv_xive_register_types(void)
2117 {
2118     type_register_static(&pnv_xive_info);
2119 }
2120 
2121 type_init(pnv_xive_register_types)
2122