xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision f2c1e591fa3adb964337daa85be1f86cd7a20a0a)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_chip.h"
22 #include "hw/ppc/pnv_core.h"
23 #include "hw/ppc/pnv_xscom.h"
24 #include "hw/ppc/pnv_xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/ppc/ppc.h"
28 #include "trace.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive_regs.h"
33 
34 #undef XIVE_DEBUG
35 
36 /*
37  * Virtual structures table (VST)
38  */
39 #define SBE_PER_BYTE   4
40 
41 typedef struct XiveVstInfo {
42     const char *name;
43     uint32_t    size;
44     uint32_t    max_blocks;
45 } XiveVstInfo;
46 
47 static const XiveVstInfo vst_infos[] = {
48     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
49     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
50     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
51     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
52 
53     /*
54      *  Interrupt fifo backing store table (not modeled) :
55      *
56      * 0 - IPI,
57      * 1 - HWD,
58      * 2 - First escalate,
59      * 3 - Second escalate,
60      * 4 - Redistribution,
61      * 5 - IPI cascaded queue ?
62      */
63     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
64 };
65 
66 #define xive_error(xive, fmt, ...)                                      \
67     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
68                   (xive)->chip->chip_id, ## __VA_ARGS__);
69 
70 /*
71  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
72  * field overrides the hardwired chip ID in the Powerbus operations
73  * and for CAM compares
74  */
75 static uint8_t pnv_xive_block_id(PnvXive *xive)
76 {
77     uint8_t blk = xive->chip->chip_id;
78     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
79 
80     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
81         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
82     }
83 
84     return blk;
85 }
86 
87 /*
88  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
89  * of the chips is good enough.
90  *
91  * TODO: Block scope support
92  */
93 static PnvXive *pnv_xive_get_remote(uint8_t blk)
94 {
95     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
96     int i;
97 
98     for (i = 0; i < pnv->num_chips; i++) {
99         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
100         PnvXive *xive = &chip9->xive;
101 
102         if (pnv_xive_block_id(xive) == blk) {
103             return xive;
104         }
105     }
106     return NULL;
107 }
108 
109 /*
110  * VST accessors for SBE, EAT, ENDT, NVT
111  *
112  * Indirect VST tables are arrays of VSDs pointing to a page (of same
113  * size). Each page is a direct VST table.
114  */
115 
116 #define XIVE_VSD_SIZE 8
117 
118 /* Indirect page size can be 4K, 64K, 2M, 16M. */
119 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
120 {
121      return page_shift == 12 || page_shift == 16 ||
122          page_shift == 21 || page_shift == 24;
123 }
124 
125 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
126                                          uint64_t vsd, uint32_t idx)
127 {
128     const XiveVstInfo *info = &vst_infos[type];
129     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
130     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
131     uint32_t idx_max;
132 
133     idx_max = vst_tsize / info->size - 1;
134     if (idx > idx_max) {
135 #ifdef XIVE_DEBUG
136         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
137                    info->name, idx, idx_max);
138 #endif
139         return 0;
140     }
141 
142     return vst_addr + idx * info->size;
143 }
144 
145 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
146                                            uint64_t vsd, uint32_t idx)
147 {
148     const XiveVstInfo *info = &vst_infos[type];
149     uint64_t vsd_addr;
150     uint32_t vsd_idx;
151     uint32_t page_shift;
152     uint32_t vst_per_page;
153 
154     /* Get the page size of the indirect table. */
155     vsd_addr = vsd & VSD_ADDRESS_MASK;
156     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
157                     MEMTXATTRS_UNSPECIFIED)) {
158         xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
159                    info->name, idx, vsd_addr);
160         return 0;
161     }
162 
163     if (!(vsd & VSD_ADDRESS_MASK)) {
164 #ifdef XIVE_DEBUG
165         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
166 #endif
167         return 0;
168     }
169 
170     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
171 
172     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
173         xive_error(xive, "VST: invalid %s page shift %d", info->name,
174                    page_shift);
175         return 0;
176     }
177 
178     vst_per_page = (1ull << page_shift) / info->size;
179     vsd_idx = idx / vst_per_page;
180 
181     /* Load the VSD we are looking for, if not already done */
182     if (vsd_idx) {
183         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
184         if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
185                        MEMTXATTRS_UNSPECIFIED)) {
186             xive_error(xive, "VST: failed to access %s entry %x @0x%"
187                        PRIx64, info->name, vsd_idx, vsd_addr);
188             return 0;
189         }
190 
191         if (!(vsd & VSD_ADDRESS_MASK)) {
192 #ifdef XIVE_DEBUG
193             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
194 #endif
195             return 0;
196         }
197 
198         /*
199          * Check that the pages have a consistent size across the
200          * indirect table
201          */
202         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
203             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
204                        info->name, idx);
205             return 0;
206         }
207     }
208 
209     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
210 }
211 
212 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
213                                   uint32_t idx)
214 {
215     const XiveVstInfo *info = &vst_infos[type];
216     uint64_t vsd;
217 
218     if (blk >= info->max_blocks) {
219         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
220                    blk, info->name, idx);
221         return 0;
222     }
223 
224     vsd = xive->vsds[type][blk];
225 
226     /* Remote VST access */
227     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
228         if (type != VST_TSEL_VPDT) {
229             xive_error(xive, "VST: invalid access on remote VST %s %x/%x !?",
230                        info->name, blk, idx);
231             return 0;
232         }
233         xive = pnv_xive_get_remote(blk);
234 
235         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
236     }
237 
238     if (VSD_INDIRECT & vsd) {
239         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
240     }
241 
242     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
243 }
244 
245 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
246                              uint32_t idx, void *data)
247 {
248     const XiveVstInfo *info = &vst_infos[type];
249     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
250     MemTxResult result;
251 
252     if (!addr) {
253         return -1;
254     }
255 
256     result = address_space_read(&address_space_memory, addr,
257                                 MEMTXATTRS_UNSPECIFIED, data,
258                                 info->size);
259     if (result != MEMTX_OK) {
260         xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
261                    " for VST %s %x/%x\n", addr, info->name, blk, idx);
262         return -1;
263     }
264     return 0;
265 }
266 
267 #define XIVE_VST_WORD_ALL -1
268 
269 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
270                               uint32_t idx, void *data, uint32_t word_number)
271 {
272     const XiveVstInfo *info = &vst_infos[type];
273     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
274     MemTxResult result;
275 
276     if (!addr) {
277         return -1;
278     }
279 
280     if (word_number == XIVE_VST_WORD_ALL) {
281         result = address_space_write(&address_space_memory, addr,
282                                      MEMTXATTRS_UNSPECIFIED, data,
283                                      info->size);
284     } else {
285         result = address_space_write(&address_space_memory,
286                                      addr + word_number * 4,
287                                      MEMTXATTRS_UNSPECIFIED,
288                                      data + word_number * 4, 4);
289     }
290 
291     if (result != MEMTX_OK) {
292         xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
293                     "for VST %s %x/%x\n", addr, info->name, blk, idx);
294         return -1;
295     }
296     return 0;
297 }
298 
299 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
300                             XiveEND *end)
301 {
302     PnvXive *xive = PNV_XIVE(xrtr);
303 
304     if (pnv_xive_block_id(xive) != blk) {
305         xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
306         return -1;
307     }
308 
309     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
310 }
311 
312 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
313                               XiveEND *end, uint8_t word_number)
314 {
315     PnvXive *xive = PNV_XIVE(xrtr);
316 
317     if (pnv_xive_block_id(xive) != blk) {
318         xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
319         return -1;
320     }
321 
322     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
323                               word_number);
324 }
325 
326 static int pnv_xive_end_update(PnvXive *xive)
327 {
328     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
329                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
330     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
331                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
332     int i;
333     uint64_t eqc_watch[4];
334 
335     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
336         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
337     }
338 
339     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
340                               XIVE_VST_WORD_ALL);
341 }
342 
343 static void pnv_xive_end_cache_load(PnvXive *xive)
344 {
345     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
346                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
347     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
348                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
349     uint64_t eqc_watch[4] = { 0 };
350     int i;
351 
352     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
353         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
354     }
355 
356     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
357         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
358     }
359 }
360 
361 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
362                             XiveNVT *nvt)
363 {
364     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
365 }
366 
367 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
368                               XiveNVT *nvt, uint8_t word_number)
369 {
370     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
371                               word_number);
372 }
373 
374 static int pnv_xive_nvt_update(PnvXive *xive)
375 {
376     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
377                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
378     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
379                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
380     int i;
381     uint64_t vpc_watch[8];
382 
383     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
384         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
385     }
386 
387     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
388                               XIVE_VST_WORD_ALL);
389 }
390 
391 static void pnv_xive_nvt_cache_load(PnvXive *xive)
392 {
393     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
394                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
395     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
396                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
397     uint64_t vpc_watch[8] = { 0 };
398     int i;
399 
400     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
401         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
402     }
403 
404     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
405         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
406     }
407 }
408 
409 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
410                             XiveEAS *eas)
411 {
412     PnvXive *xive = PNV_XIVE(xrtr);
413 
414     /*
415      * EAT lookups should be local to the IC
416      */
417     if (pnv_xive_block_id(xive) != blk) {
418         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
419         return -1;
420     }
421 
422     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
423 }
424 
425 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
426                            uint8_t *pq)
427 {
428     PnvXive *xive = PNV_XIVE(xrtr);
429 
430     if (pnv_xive_block_id(xive) != blk) {
431         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
432         return -1;
433     }
434 
435     *pq = xive_source_esb_get(&xive->ipi_source, idx);
436     return 0;
437 }
438 
439 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
440                            uint8_t *pq)
441 {
442     PnvXive *xive = PNV_XIVE(xrtr);
443 
444     if (pnv_xive_block_id(xive) != blk) {
445         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
446         return -1;
447     }
448 
449     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
450     return 0;
451 }
452 
453 /*
454  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
455  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
456  * second register covers cores 16-23 (normal) or 8-11 (fused).
457  */
458 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
459 {
460     int pir = ppc_cpu_pir(cpu);
461     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
462     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
463     uint32_t bit = pir & 0x3f;
464 
465     return xive->regs[reg >> 3] & PPC_BIT(bit);
466 }
467 
468 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
469                               uint8_t nvt_blk, uint32_t nvt_idx,
470                               bool cam_ignore, uint8_t priority,
471                               uint32_t logic_serv, XiveTCTXMatch *match)
472 {
473     PnvXive *xive = PNV_XIVE(xptr);
474     PnvChip *chip = xive->chip;
475     int count = 0;
476     int i, j;
477 
478     for (i = 0; i < chip->nr_cores; i++) {
479         PnvCore *pc = chip->cores[i];
480         CPUCore *cc = CPU_CORE(pc);
481 
482         for (j = 0; j < cc->nr_threads; j++) {
483             PowerPCCPU *cpu = pc->threads[j];
484             XiveTCTX *tctx;
485             int ring;
486 
487             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
488                 continue;
489             }
490 
491             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
492 
493             /*
494              * Check the thread context CAM lines and record matches.
495              */
496             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
497                                              nvt_idx, cam_ignore, logic_serv);
498             /*
499              * Save the context and follow on to catch duplicates, that we
500              * don't support yet.
501              */
502             if (ring != -1) {
503                 if (match->tctx) {
504                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
505                                   "thread context NVT %x/%x\n",
506                                   nvt_blk, nvt_idx);
507                     return -1;
508                 }
509 
510                 match->ring = ring;
511                 match->tctx = tctx;
512                 count++;
513             }
514         }
515     }
516 
517     return count;
518 }
519 
520 static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
521 {
522     uint32_t cfg = 0;
523 
524     /* TIMA GEN1 is all P9 knows */
525     cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
526 
527     return cfg;
528 }
529 
530 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
531 {
532     return pnv_xive_block_id(PNV_XIVE(xrtr));
533 }
534 
535 /*
536  * The TIMA MMIO space is shared among the chips and to identify the
537  * chip from which the access is being done, we extract the chip id
538  * from the PIR.
539  */
540 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
541 {
542     int pir = ppc_cpu_pir(cpu);
543     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
544     PnvXive *xive = PNV_XIVE(xptr);
545 
546     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
547         xive_error(xive, "IC: CPU %x is not enabled", pir);
548     }
549     return xive;
550 }
551 
552 /*
553  * The internal sources (IPIs) of the interrupt controller have no
554  * knowledge of the XIVE chip on which they reside. Encode the block
555  * id in the source interrupt number before forwarding the source
556  * event notification to the Router. This is required on a multichip
557  * system.
558  */
559 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
560 {
561     PnvXive *xive = PNV_XIVE(xn);
562     uint8_t blk = pnv_xive_block_id(xive);
563 
564     xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
565 }
566 
567 /*
568  * XIVE helpers
569  */
570 
571 static uint64_t pnv_xive_vc_size(PnvXive *xive)
572 {
573     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
574 }
575 
576 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
577 {
578     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
579 }
580 
581 static uint64_t pnv_xive_pc_size(PnvXive *xive)
582 {
583     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
584 }
585 
586 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
587 {
588     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
589     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
590 
591     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
592 }
593 
594 /*
595  * Compute the number of entries per indirect subpage.
596  */
597 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
598 {
599     uint8_t blk = pnv_xive_block_id(xive);
600     uint64_t vsd = xive->vsds[type][blk];
601     const XiveVstInfo *info = &vst_infos[type];
602     uint64_t vsd_addr;
603     uint32_t page_shift;
604 
605     /* For direct tables, fake a valid value */
606     if (!(VSD_INDIRECT & vsd)) {
607         return 1;
608     }
609 
610     /* Get the page size of the indirect table. */
611     vsd_addr = vsd & VSD_ADDRESS_MASK;
612     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
613                    MEMTXATTRS_UNSPECIFIED)) {
614         xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
615                    info->name, vsd_addr);
616         return 0;
617     }
618 
619     if (!(vsd & VSD_ADDRESS_MASK)) {
620 #ifdef XIVE_DEBUG
621         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
622 #endif
623         return 0;
624     }
625 
626     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
627 
628     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
629         xive_error(xive, "VST: invalid %s page shift %d", info->name,
630                    page_shift);
631         return 0;
632     }
633 
634     return (1ull << page_shift) / info->size;
635 }
636 
637 /*
638  * EDT Table
639  *
640  * The Virtualization Controller MMIO region containing the IPI ESB
641  * pages and END ESB pages is sub-divided into "sets" which map
642  * portions of the VC region to the different ESB pages. It is
643  * configured at runtime through the EDT "Domain Table" to let the
644  * firmware decide how to split the VC address space between IPI ESB
645  * pages and END ESB pages.
646  */
647 
648 /*
649  * Computes the overall size of the IPI or the END ESB pages
650  */
651 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
652 {
653     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
654     uint64_t size = 0;
655     int i;
656 
657     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
658         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
659 
660         if (edt_type == type) {
661             size += edt_size;
662         }
663     }
664 
665     return size;
666 }
667 
668 /*
669  * Maps an offset of the VC region in the IPI or END region using the
670  * layout defined by the EDT "Domaine Table"
671  */
672 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
673                                               uint64_t type)
674 {
675     int i;
676     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
677     uint64_t edt_offset = vc_offset;
678 
679     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
680         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
681 
682         if (edt_type != type) {
683             edt_offset -= edt_size;
684         }
685     }
686 
687     return edt_offset;
688 }
689 
690 static void pnv_xive_edt_resize(PnvXive *xive)
691 {
692     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
693     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
694 
695     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
696     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
697 
698     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
699     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
700 }
701 
702 /*
703  * XIVE Table configuration. Only EDT is supported.
704  */
705 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
706 {
707     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
708     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
709     uint64_t *xive_table;
710     uint8_t max_index;
711 
712     switch (tsel) {
713     case CQ_TAR_TSEL_BLK:
714         max_index = ARRAY_SIZE(xive->blk);
715         xive_table = xive->blk;
716         break;
717     case CQ_TAR_TSEL_MIG:
718         max_index = ARRAY_SIZE(xive->mig);
719         xive_table = xive->mig;
720         break;
721     case CQ_TAR_TSEL_EDT:
722         max_index = ARRAY_SIZE(xive->edt);
723         xive_table = xive->edt;
724         break;
725     case CQ_TAR_TSEL_VDT:
726         max_index = ARRAY_SIZE(xive->vdt);
727         xive_table = xive->vdt;
728         break;
729     default:
730         xive_error(xive, "IC: invalid table %d", (int) tsel);
731         return -1;
732     }
733 
734     if (tsel_index >= max_index) {
735         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
736         return -1;
737     }
738 
739     xive_table[tsel_index] = val;
740 
741     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
742         xive->regs[CQ_TAR >> 3] =
743             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
744     }
745 
746     /*
747      * EDT configuration is complete. Resize the MMIO windows exposing
748      * the IPI and the END ESBs in the VC region.
749      */
750     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
751         pnv_xive_edt_resize(xive);
752     }
753 
754     return 0;
755 }
756 
757 /*
758  * Virtual Structure Tables (VST) configuration
759  */
760 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
761                                        uint8_t blk, uint64_t vsd)
762 {
763     XiveENDSource *end_xsrc = &xive->end_source;
764     XiveSource *xsrc = &xive->ipi_source;
765     const XiveVstInfo *info = &vst_infos[type];
766     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
767     uint64_t vst_tsize = 1ull << page_shift;
768     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
769 
770     /* Basic checks */
771 
772     if (VSD_INDIRECT & vsd) {
773         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
774             xive_error(xive, "VST: %s indirect tables are not enabled",
775                        info->name);
776             return;
777         }
778 
779         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
780             xive_error(xive, "VST: invalid %s page shift %d", info->name,
781                        page_shift);
782             return;
783         }
784     }
785 
786     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
787         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
788                    " page shift %d", info->name, vst_addr, page_shift);
789         return;
790     }
791 
792     /* Record the table configuration (in SRAM on HW) */
793     xive->vsds[type][blk] = vsd;
794 
795     /* Now tune the models with the configuration provided by the FW */
796 
797     switch (type) {
798     case VST_TSEL_IVT:  /* Nothing to be done */
799         break;
800 
801     case VST_TSEL_EQDT:
802         /*
803          * Backing store pages for the END.
804          *
805          * If the table is direct, we can compute the number of PQ
806          * entries provisioned by FW (such as skiboot) and resize the
807          * END ESB window accordingly.
808          */
809         if (!(VSD_INDIRECT & vsd)) {
810             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
811                                    * (1ull << xsrc->esb_shift));
812         }
813         memory_region_add_subregion(&xive->end_edt_mmio, 0,
814                                     &end_xsrc->esb_mmio);
815         break;
816 
817     case VST_TSEL_SBE:
818         /*
819          * Backing store pages for the source PQ bits. The model does
820          * not use these PQ bits backed in RAM because the XiveSource
821          * model has its own.
822          *
823          * If the table is direct, we can compute the number of PQ
824          * entries provisioned by FW (such as skiboot) and resize the
825          * ESB window accordingly.
826          */
827         if (!(VSD_INDIRECT & vsd)) {
828             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
829                                    * (1ull << xsrc->esb_shift));
830         }
831         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
832         break;
833 
834     case VST_TSEL_VPDT: /* Not modeled */
835     case VST_TSEL_IRQ:  /* Not modeled */
836         /*
837          * These tables contains the backing store pages for the
838          * interrupt fifos of the VC sub-engine in case of overflow.
839          */
840         break;
841 
842     default:
843         g_assert_not_reached();
844     }
845 }
846 
847 /*
848  * Both PC and VC sub-engines are configured as each use the Virtual
849  * Structure Tables : SBE, EAS, END and NVT.
850  */
851 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
852 {
853     uint8_t mode = GETFIELD(VSD_MODE, vsd);
854     uint8_t type = GETFIELD(VST_TABLE_SELECT,
855                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
856     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
857                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
858     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
859 
860     if (type > VST_TSEL_IRQ) {
861         xive_error(xive, "VST: invalid table type %d", type);
862         return;
863     }
864 
865     if (blk >= vst_infos[type].max_blocks) {
866         xive_error(xive, "VST: invalid block id %d for"
867                       " %s table", blk, vst_infos[type].name);
868         return;
869     }
870 
871     /*
872      * Only take the VC sub-engine configuration into account because
873      * the XiveRouter model combines both VC and PC sub-engines
874      */
875     if (pc_engine) {
876         return;
877     }
878 
879     if (!vst_addr) {
880         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
881         return;
882     }
883 
884     switch (mode) {
885     case VSD_MODE_FORWARD:
886         xive->vsds[type][blk] = vsd;
887         break;
888 
889     case VSD_MODE_EXCLUSIVE:
890         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
891         break;
892 
893     default:
894         xive_error(xive, "VST: unsupported table mode %d", mode);
895         return;
896     }
897 }
898 
899 /*
900  * Interrupt controller MMIO region. The layout is compatible between
901  * 4K and 64K pages :
902  *
903  * Page 0           sub-engine BARs
904  *  0x000 - 0x3FF   IC registers
905  *  0x400 - 0x7FF   PC registers
906  *  0x800 - 0xFFF   VC registers
907  *
908  * Page 1           Notify page (writes only)
909  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
910  *  0x800 - 0xFFF   forwards and syncs
911  *
912  * Page 2           LSI Trigger page (writes only) (not modeled)
913  * Page 3           LSI SB EOI page (reads only) (not modeled)
914  *
915  * Page 4-7         indirect TIMA
916  */
917 
918 /*
919  * IC - registers MMIO
920  */
921 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
922                                   uint64_t val, unsigned size)
923 {
924     PnvXive *xive = PNV_XIVE(opaque);
925     MemoryRegion *sysmem = get_system_memory();
926     uint32_t reg = offset >> 3;
927     bool is_chip0 = xive->chip->chip_id == 0;
928 
929     switch (offset) {
930 
931     /*
932      * XIVE CQ (PowerBus bridge) settings
933      */
934     case CQ_MSGSND:     /* msgsnd for doorbells */
935     case CQ_FIRMASK_OR: /* FIR error reporting */
936         break;
937     case CQ_PBI_CTL:
938         if (val & CQ_PBI_PC_64K) {
939             xive->pc_shift = 16;
940         }
941         if (val & CQ_PBI_VC_64K) {
942             xive->vc_shift = 16;
943         }
944         break;
945     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
946         /*
947          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
948          */
949         break;
950 
951     /*
952      * XIVE Virtualization Controller settings
953      */
954     case VC_GLOBAL_CONFIG:
955         break;
956 
957     /*
958      * XIVE Presenter Controller settings
959      */
960     case PC_GLOBAL_CONFIG:
961         /*
962          * PC_GCONF_CHIPID_OVR
963          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
964          */
965         break;
966     case PC_TCTXT_CFG:
967         /*
968          * TODO: block group support
969          */
970         break;
971     case PC_TCTXT_TRACK:
972         /*
973          * PC_TCTXT_TRACK_EN:
974          *   enable block tracking and exchange of block ownership
975          *   information between Interrupt controllers
976          */
977         break;
978 
979     /*
980      * Misc settings
981      */
982     case VC_SBC_CONFIG: /* Store EOI configuration */
983         /*
984          * Configure store EOI if required by firwmare (skiboot has removed
985          * support recently though)
986          */
987         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
988             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
989         }
990         break;
991 
992     case VC_EQC_CONFIG: /* TODO: silent escalation */
993     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
994         break;
995 
996     /*
997      * XIVE BAR settings (XSCOM only)
998      */
999     case CQ_RST_CTL:
1000         /* bit4: resets all BAR registers */
1001         break;
1002 
1003     case CQ_IC_BAR: /* IC BAR. 8 pages */
1004         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
1005         if (!(val & CQ_IC_BAR_VALID)) {
1006             xive->ic_base = 0;
1007             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
1008                 memory_region_del_subregion(&xive->ic_mmio,
1009                                             &xive->ic_reg_mmio);
1010                 memory_region_del_subregion(&xive->ic_mmio,
1011                                             &xive->ic_notify_mmio);
1012                 memory_region_del_subregion(&xive->ic_mmio,
1013                                             &xive->ic_lsi_mmio);
1014                 memory_region_del_subregion(&xive->ic_mmio,
1015                                             &xive->tm_indirect_mmio);
1016 
1017                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
1018             }
1019         } else {
1020             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
1021             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
1022                 memory_region_add_subregion(sysmem, xive->ic_base,
1023                                             &xive->ic_mmio);
1024 
1025                 memory_region_add_subregion(&xive->ic_mmio,  0,
1026                                             &xive->ic_reg_mmio);
1027                 memory_region_add_subregion(&xive->ic_mmio,
1028                                             1ul << xive->ic_shift,
1029                                             &xive->ic_notify_mmio);
1030                 memory_region_add_subregion(&xive->ic_mmio,
1031                                             2ul << xive->ic_shift,
1032                                             &xive->ic_lsi_mmio);
1033                 memory_region_add_subregion(&xive->ic_mmio,
1034                                             4ull << xive->ic_shift,
1035                                             &xive->tm_indirect_mmio);
1036             }
1037         }
1038         break;
1039 
1040     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1041     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1042         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1043         if (!(val & CQ_TM_BAR_VALID)) {
1044             xive->tm_base = 0;
1045             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1046                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1047             }
1048         } else {
1049             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1050             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1051                 memory_region_add_subregion(sysmem, xive->tm_base,
1052                                             &xive->tm_mmio);
1053             }
1054         }
1055         break;
1056 
1057     case CQ_PC_BARM:
1058         xive->regs[reg] = val;
1059         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1060         break;
1061     case CQ_PC_BAR: /* From 32M to 512G */
1062         if (!(val & CQ_PC_BAR_VALID)) {
1063             xive->pc_base = 0;
1064             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1065                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1066             }
1067         } else {
1068             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1069             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1070                 memory_region_add_subregion(sysmem, xive->pc_base,
1071                                             &xive->pc_mmio);
1072             }
1073         }
1074         break;
1075 
1076     case CQ_VC_BARM:
1077         xive->regs[reg] = val;
1078         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1079         break;
1080     case CQ_VC_BAR: /* From 64M to 4TB */
1081         if (!(val & CQ_VC_BAR_VALID)) {
1082             xive->vc_base = 0;
1083             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1084                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1085             }
1086         } else {
1087             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1088             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1089                 memory_region_add_subregion(sysmem, xive->vc_base,
1090                                             &xive->vc_mmio);
1091             }
1092         }
1093         break;
1094 
1095     /*
1096      * XIVE Table settings.
1097      */
1098     case CQ_TAR: /* Table Address */
1099         break;
1100     case CQ_TDR: /* Table Data */
1101         pnv_xive_table_set_data(xive, val);
1102         break;
1103 
1104     /*
1105      * XIVE VC & PC Virtual Structure Table settings
1106      */
1107     case VC_VSD_TABLE_ADDR:
1108     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1109         break;
1110     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1111     case PC_VSD_TABLE_DATA:
1112         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1113         break;
1114 
1115     /*
1116      * Interrupt fifo overflow in memory backing store (Not modeled)
1117      */
1118     case VC_IRQ_CONFIG_IPI:
1119     case VC_IRQ_CONFIG_HW:
1120     case VC_IRQ_CONFIG_CASCADE1:
1121     case VC_IRQ_CONFIG_CASCADE2:
1122     case VC_IRQ_CONFIG_REDIST:
1123     case VC_IRQ_CONFIG_IPI_CASC:
1124         break;
1125 
1126     /*
1127      * XIVE hardware thread enablement
1128      */
1129     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1130     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1131         break;
1132 
1133     case PC_THREAD_EN_REG0_SET:
1134         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1135         break;
1136     case PC_THREAD_EN_REG1_SET:
1137         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1138         break;
1139     case PC_THREAD_EN_REG0_CLR:
1140         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1141         break;
1142     case PC_THREAD_EN_REG1_CLR:
1143         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1144         break;
1145 
1146     /*
1147      * Indirect TIMA access set up. Defines the PIR of the HW thread
1148      * to use.
1149      */
1150     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1151         break;
1152 
1153     /*
1154      * XIVE PC & VC cache updates for EAS, NVT and END
1155      */
1156     case VC_IVC_SCRUB_MASK:
1157     case VC_IVC_SCRUB_TRIG:
1158         break;
1159 
1160     case VC_EQC_CWATCH_SPEC:
1161         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1162         break;
1163     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1164         break;
1165     case VC_EQC_CWATCH_DAT0:
1166         /* writing to DATA0 triggers the cache write */
1167         xive->regs[reg] = val;
1168         pnv_xive_end_update(xive);
1169         break;
1170     case VC_EQC_SCRUB_MASK:
1171     case VC_EQC_SCRUB_TRIG:
1172         /*
1173          * The scrubbing registers flush the cache in RAM and can also
1174          * invalidate.
1175          */
1176         break;
1177 
1178     case PC_VPC_CWATCH_SPEC:
1179         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1180         break;
1181     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1182         break;
1183     case PC_VPC_CWATCH_DAT0:
1184         /* writing to DATA0 triggers the cache write */
1185         xive->regs[reg] = val;
1186         pnv_xive_nvt_update(xive);
1187         break;
1188     case PC_VPC_SCRUB_MASK:
1189     case PC_VPC_SCRUB_TRIG:
1190         /*
1191          * The scrubbing registers flush the cache in RAM and can also
1192          * invalidate.
1193          */
1194         break;
1195 
1196 
1197     /*
1198      * XIVE PC & VC cache invalidation
1199      */
1200     case PC_AT_KILL:
1201         break;
1202     case VC_AT_MACRO_KILL:
1203         break;
1204     case PC_AT_KILL_MASK:
1205     case VC_AT_MACRO_KILL_MASK:
1206         break;
1207 
1208     default:
1209         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1210         return;
1211     }
1212 
1213     xive->regs[reg] = val;
1214 }
1215 
1216 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1217 {
1218     PnvXive *xive = PNV_XIVE(opaque);
1219     uint64_t val = 0;
1220     uint32_t reg = offset >> 3;
1221 
1222     switch (offset) {
1223     case CQ_CFG_PB_GEN:
1224     case CQ_IC_BAR:
1225     case CQ_TM1_BAR:
1226     case CQ_TM2_BAR:
1227     case CQ_PC_BAR:
1228     case CQ_PC_BARM:
1229     case CQ_VC_BAR:
1230     case CQ_VC_BARM:
1231     case CQ_TAR:
1232     case CQ_TDR:
1233     case CQ_PBI_CTL:
1234 
1235     case PC_TCTXT_CFG:
1236     case PC_TCTXT_TRACK:
1237     case PC_TCTXT_INDIR0:
1238     case PC_TCTXT_INDIR1:
1239     case PC_TCTXT_INDIR2:
1240     case PC_TCTXT_INDIR3:
1241     case PC_GLOBAL_CONFIG:
1242 
1243     case PC_VPC_SCRUB_MASK:
1244 
1245     case VC_GLOBAL_CONFIG:
1246     case VC_AIB_TX_ORDER_TAG2:
1247 
1248     case VC_IRQ_CONFIG_IPI:
1249     case VC_IRQ_CONFIG_HW:
1250     case VC_IRQ_CONFIG_CASCADE1:
1251     case VC_IRQ_CONFIG_CASCADE2:
1252     case VC_IRQ_CONFIG_REDIST:
1253     case VC_IRQ_CONFIG_IPI_CASC:
1254 
1255     case VC_EQC_SCRUB_MASK:
1256     case VC_IVC_SCRUB_MASK:
1257     case VC_SBC_CONFIG:
1258     case VC_AT_MACRO_KILL_MASK:
1259     case VC_VSD_TABLE_ADDR:
1260     case PC_VSD_TABLE_ADDR:
1261     case VC_VSD_TABLE_DATA:
1262     case PC_VSD_TABLE_DATA:
1263     case PC_THREAD_EN_REG0:
1264     case PC_THREAD_EN_REG1:
1265         val = xive->regs[reg];
1266         break;
1267 
1268     /*
1269      * XIVE hardware thread enablement
1270      */
1271     case PC_THREAD_EN_REG0_SET:
1272     case PC_THREAD_EN_REG0_CLR:
1273         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1274         break;
1275     case PC_THREAD_EN_REG1_SET:
1276     case PC_THREAD_EN_REG1_CLR:
1277         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1278         break;
1279 
1280     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1281         val = 0xffffff0000000000;
1282         break;
1283 
1284     /*
1285      * XIVE PC & VC cache updates for EAS, NVT and END
1286      */
1287     case VC_EQC_CWATCH_SPEC:
1288         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1289         val = xive->regs[reg];
1290         break;
1291     case VC_EQC_CWATCH_DAT0:
1292         /*
1293          * Load DATA registers from cache with data requested by the
1294          * SPEC register
1295          */
1296         pnv_xive_end_cache_load(xive);
1297         val = xive->regs[reg];
1298         break;
1299     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1300         val = xive->regs[reg];
1301         break;
1302 
1303     case PC_VPC_CWATCH_SPEC:
1304         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1305         val = xive->regs[reg];
1306         break;
1307     case PC_VPC_CWATCH_DAT0:
1308         /*
1309          * Load DATA registers from cache with data requested by the
1310          * SPEC register
1311          */
1312         pnv_xive_nvt_cache_load(xive);
1313         val = xive->regs[reg];
1314         break;
1315     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1316         val = xive->regs[reg];
1317         break;
1318 
1319     case PC_VPC_SCRUB_TRIG:
1320     case VC_IVC_SCRUB_TRIG:
1321     case VC_EQC_SCRUB_TRIG:
1322         xive->regs[reg] &= ~VC_SCRUB_VALID;
1323         val = xive->regs[reg];
1324         break;
1325 
1326     /*
1327      * XIVE PC & VC cache invalidation
1328      */
1329     case PC_AT_KILL:
1330         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1331         val = xive->regs[reg];
1332         break;
1333     case VC_AT_MACRO_KILL:
1334         xive->regs[reg] &= ~VC_KILL_VALID;
1335         val = xive->regs[reg];
1336         break;
1337 
1338     /*
1339      * XIVE synchronisation
1340      */
1341     case VC_EQC_CONFIG:
1342         val = VC_EQC_SYNC_MASK;
1343         break;
1344 
1345     default:
1346         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1347     }
1348 
1349     return val;
1350 }
1351 
1352 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1353     .read = pnv_xive_ic_reg_read,
1354     .write = pnv_xive_ic_reg_write,
1355     .endianness = DEVICE_BIG_ENDIAN,
1356     .valid = {
1357         .min_access_size = 8,
1358         .max_access_size = 8,
1359     },
1360     .impl = {
1361         .min_access_size = 8,
1362         .max_access_size = 8,
1363     },
1364 };
1365 
1366 /*
1367  * IC - Notify MMIO port page (write only)
1368  */
1369 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1370 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1371 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1372 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1373 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1374 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1375 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1376 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1377 
1378 /* VC synchronisation */
1379 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1380 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1381 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1382 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1383 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1384 
1385 /* PC synchronisation */
1386 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1387 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1388 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1389 
1390 static void pnv_xive_end_notify(XiveRouter *xrtr, XiveEAS *eas)
1391 {
1392     PnvXive *xive = PNV_XIVE(xrtr);
1393     uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
1394     uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
1395     uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
1396     uint64_t end_vsd = xive->vsds[VST_TSEL_EQDT][end_blk];
1397 
1398     switch (GETFIELD(VSD_MODE, end_vsd)) {
1399     case VSD_MODE_EXCLUSIVE:
1400         /* Perform the END notification on the local IC. */
1401         xive_router_end_notify(xrtr, eas);
1402         break;
1403 
1404     case VSD_MODE_FORWARD: {
1405         MemTxResult result;
1406         uint64_t notif_port = end_vsd & VSD_ADDRESS_MASK;
1407         uint64_t data = XIVE_TRIGGER_END | XIVE_TRIGGER_PQ |
1408             be64_to_cpu(eas->w);
1409 
1410         /* Forward the store on the remote IC notify page. */
1411         address_space_stq_be(&address_space_memory, notif_port, data,
1412                              MEMTXATTRS_UNSPECIFIED, &result);
1413         if (result != MEMTX_OK) {
1414             xive_error(xive, "IC: Forward notif END %x/%x [%x] failed @%"
1415                        HWADDR_PRIx, end_blk, end_idx, end_data, notif_port);
1416             return;
1417         }
1418         break;
1419     }
1420 
1421     case VSD_MODE_INVALID:
1422     default:
1423         /* Set FIR */
1424         xive_error(xive, "IC: Invalid END VSD for block %x", end_blk);
1425         return;
1426     }
1427 }
1428 
1429 /*
1430  * The notify page can either be used to receive trigger events from
1431  * the HW controllers (PHB, PSI) or to reroute interrupts between
1432  * Interrupt controllers.
1433  */
1434 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1435 {
1436     uint8_t blk;
1437     uint32_t idx;
1438 
1439     trace_pnv_xive_ic_hw_trigger(addr, val);
1440 
1441     if (val & XIVE_TRIGGER_END) {
1442         val = cpu_to_be64(val);
1443         pnv_xive_end_notify(XIVE_ROUTER(xive), (XiveEAS *) &val);
1444         return;
1445     }
1446 
1447     /*
1448      * Forward the source event notification directly to the Router.
1449      * The source interrupt number should already be correctly encoded
1450      * with the chip block id by the sending device (PHB, PSI).
1451      */
1452     blk = XIVE_EAS_BLOCK(val);
1453     idx = XIVE_EAS_INDEX(val);
1454 
1455     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1456                        !!(val & XIVE_TRIGGER_PQ));
1457 }
1458 
1459 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1460                                      unsigned size)
1461 {
1462     PnvXive *xive = PNV_XIVE(opaque);
1463 
1464     /* VC: HW triggers */
1465     switch (addr) {
1466     case 0x000 ... 0x7FF:
1467         pnv_xive_ic_hw_trigger(opaque, addr, val);
1468         break;
1469 
1470     /* VC: Forwarded IRQs */
1471     case PNV_XIVE_FORWARD_IPI:
1472     case PNV_XIVE_FORWARD_HW:
1473     case PNV_XIVE_FORWARD_OS_ESC:
1474     case PNV_XIVE_FORWARD_HW_ESC:
1475     case PNV_XIVE_FORWARD_REDIS:
1476         /* TODO: forwarded IRQs. Should be like HW triggers */
1477         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1478                    addr, val);
1479         break;
1480 
1481     /* VC syncs */
1482     case PNV_XIVE_SYNC_IPI:
1483     case PNV_XIVE_SYNC_HW:
1484     case PNV_XIVE_SYNC_OS_ESC:
1485     case PNV_XIVE_SYNC_HW_ESC:
1486     case PNV_XIVE_SYNC_REDIS:
1487         break;
1488 
1489     /* PC syncs */
1490     case PNV_XIVE_SYNC_PULL:
1491     case PNV_XIVE_SYNC_PUSH:
1492     case PNV_XIVE_SYNC_VPC:
1493         break;
1494 
1495     default:
1496         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1497     }
1498 }
1499 
1500 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1501                                         unsigned size)
1502 {
1503     PnvXive *xive = PNV_XIVE(opaque);
1504 
1505     /* loads are invalid */
1506     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1507     return -1;
1508 }
1509 
1510 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1511     .read = pnv_xive_ic_notify_read,
1512     .write = pnv_xive_ic_notify_write,
1513     .endianness = DEVICE_BIG_ENDIAN,
1514     .valid = {
1515         .min_access_size = 8,
1516         .max_access_size = 8,
1517     },
1518     .impl = {
1519         .min_access_size = 8,
1520         .max_access_size = 8,
1521     },
1522 };
1523 
1524 /*
1525  * IC - LSI MMIO handlers (not modeled)
1526  */
1527 
1528 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1529                               uint64_t val, unsigned size)
1530 {
1531     PnvXive *xive = PNV_XIVE(opaque);
1532 
1533     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1534 }
1535 
1536 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1537 {
1538     PnvXive *xive = PNV_XIVE(opaque);
1539 
1540     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1541     return -1;
1542 }
1543 
1544 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1545     .read = pnv_xive_ic_lsi_read,
1546     .write = pnv_xive_ic_lsi_write,
1547     .endianness = DEVICE_BIG_ENDIAN,
1548     .valid = {
1549         .min_access_size = 8,
1550         .max_access_size = 8,
1551     },
1552     .impl = {
1553         .min_access_size = 8,
1554         .max_access_size = 8,
1555     },
1556 };
1557 
1558 /*
1559  * IC - Indirect TIMA MMIO handlers
1560  */
1561 
1562 /*
1563  * When the TIMA is accessed from the indirect page, the thread id of
1564  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1565  * use. This is used for resets and for debug purpose also.
1566  */
1567 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1568 {
1569     PnvChip *chip = xive->chip;
1570     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1571     PowerPCCPU *cpu = NULL;
1572     int pir;
1573 
1574     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1575         xive_error(xive, "IC: no indirect TIMA access in progress");
1576         return NULL;
1577     }
1578 
1579     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1580     cpu = pnv_chip_find_cpu(chip, pir);
1581     if (!cpu) {
1582         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1583         return NULL;
1584     }
1585 
1586     /* Check that HW thread is XIVE enabled */
1587     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1588         xive_error(xive, "IC: CPU %x is not enabled", pir);
1589     }
1590 
1591     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1592 }
1593 
1594 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1595                                    uint64_t value, unsigned size)
1596 {
1597     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1598 
1599     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1600 }
1601 
1602 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1603                                       unsigned size)
1604 {
1605     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1606 
1607     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1608 }
1609 
1610 static const MemoryRegionOps xive_tm_indirect_ops = {
1611     .read = xive_tm_indirect_read,
1612     .write = xive_tm_indirect_write,
1613     .endianness = DEVICE_BIG_ENDIAN,
1614     .valid = {
1615         .min_access_size = 1,
1616         .max_access_size = 8,
1617     },
1618     .impl = {
1619         .min_access_size = 1,
1620         .max_access_size = 8,
1621     },
1622 };
1623 
1624 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1625                               uint64_t value, unsigned size)
1626 {
1627     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1628     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1629     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1630 
1631     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1632 }
1633 
1634 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1635 {
1636     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1637     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1638     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1639 
1640     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1641 }
1642 
1643 const MemoryRegionOps pnv_xive_tm_ops = {
1644     .read = pnv_xive_tm_read,
1645     .write = pnv_xive_tm_write,
1646     .endianness = DEVICE_BIG_ENDIAN,
1647     .valid = {
1648         .min_access_size = 1,
1649         .max_access_size = 8,
1650     },
1651     .impl = {
1652         .min_access_size = 1,
1653         .max_access_size = 8,
1654     },
1655 };
1656 
1657 /*
1658  * Interrupt controller XSCOM region.
1659  */
1660 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1661 {
1662     switch (addr >> 3) {
1663     case X_VC_EQC_CONFIG:
1664         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1665         return VC_EQC_SYNC_MASK;
1666     default:
1667         return pnv_xive_ic_reg_read(opaque, addr, size);
1668     }
1669 }
1670 
1671 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1672                                 uint64_t val, unsigned size)
1673 {
1674     pnv_xive_ic_reg_write(opaque, addr, val, size);
1675 }
1676 
1677 static const MemoryRegionOps pnv_xive_xscom_ops = {
1678     .read = pnv_xive_xscom_read,
1679     .write = pnv_xive_xscom_write,
1680     .endianness = DEVICE_BIG_ENDIAN,
1681     .valid = {
1682         .min_access_size = 8,
1683         .max_access_size = 8,
1684     },
1685     .impl = {
1686         .min_access_size = 8,
1687         .max_access_size = 8,
1688     }
1689 };
1690 
1691 /*
1692  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1693  */
1694 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1695                                  unsigned size)
1696 {
1697     PnvXive *xive = PNV_XIVE(opaque);
1698     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1699     uint64_t edt_type = 0;
1700     uint64_t edt_offset;
1701     MemTxResult result;
1702     AddressSpace *edt_as = NULL;
1703     uint64_t ret = -1;
1704 
1705     if (edt_index < XIVE_TABLE_EDT_MAX) {
1706         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1707     }
1708 
1709     switch (edt_type) {
1710     case CQ_TDR_EDT_IPI:
1711         edt_as = &xive->ipi_as;
1712         break;
1713     case CQ_TDR_EDT_EQ:
1714         edt_as = &xive->end_as;
1715         break;
1716     default:
1717         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1718         return -1;
1719     }
1720 
1721     /* Remap the offset for the targeted address space */
1722     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1723 
1724     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1725                             &result);
1726 
1727     if (result != MEMTX_OK) {
1728         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1729                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1730                    offset, edt_offset);
1731         return -1;
1732     }
1733 
1734     return ret;
1735 }
1736 
1737 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1738                               uint64_t val, unsigned size)
1739 {
1740     PnvXive *xive = PNV_XIVE(opaque);
1741     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1742     uint64_t edt_type = 0;
1743     uint64_t edt_offset;
1744     MemTxResult result;
1745     AddressSpace *edt_as = NULL;
1746 
1747     if (edt_index < XIVE_TABLE_EDT_MAX) {
1748         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1749     }
1750 
1751     switch (edt_type) {
1752     case CQ_TDR_EDT_IPI:
1753         edt_as = &xive->ipi_as;
1754         break;
1755     case CQ_TDR_EDT_EQ:
1756         edt_as = &xive->end_as;
1757         break;
1758     default:
1759         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1760                    offset);
1761         return;
1762     }
1763 
1764     /* Remap the offset for the targeted address space */
1765     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1766 
1767     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1768     if (result != MEMTX_OK) {
1769         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1770     }
1771 }
1772 
1773 static const MemoryRegionOps pnv_xive_vc_ops = {
1774     .read = pnv_xive_vc_read,
1775     .write = pnv_xive_vc_write,
1776     .endianness = DEVICE_BIG_ENDIAN,
1777     .valid = {
1778         .min_access_size = 8,
1779         .max_access_size = 8,
1780     },
1781     .impl = {
1782         .min_access_size = 8,
1783         .max_access_size = 8,
1784     },
1785 };
1786 
1787 /*
1788  * Presenter Controller MMIO region. The Virtualization Controller
1789  * updates the IPB in the NVT table when required. Not modeled.
1790  */
1791 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1792                                  unsigned size)
1793 {
1794     PnvXive *xive = PNV_XIVE(opaque);
1795 
1796     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1797     return -1;
1798 }
1799 
1800 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1801                               uint64_t value, unsigned size)
1802 {
1803     PnvXive *xive = PNV_XIVE(opaque);
1804 
1805     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1806 }
1807 
1808 static const MemoryRegionOps pnv_xive_pc_ops = {
1809     .read = pnv_xive_pc_read,
1810     .write = pnv_xive_pc_write,
1811     .endianness = DEVICE_BIG_ENDIAN,
1812     .valid = {
1813         .min_access_size = 8,
1814         .max_access_size = 8,
1815     },
1816     .impl = {
1817         .min_access_size = 8,
1818         .max_access_size = 8,
1819     },
1820 };
1821 
1822 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1823                                     Monitor *mon)
1824 {
1825     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1826     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1827 
1828     if (!xive_nvt_is_valid(nvt)) {
1829         return;
1830     }
1831 
1832     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1833                    eq_blk, eq_idx,
1834                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1835 }
1836 
1837 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1838 {
1839     XiveRouter *xrtr = XIVE_ROUTER(xive);
1840     uint8_t blk = pnv_xive_block_id(xive);
1841     uint8_t chip_id = xive->chip->chip_id;
1842     uint32_t srcno0 = XIVE_EAS(blk, 0);
1843     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1844     XiveEAS eas;
1845     XiveEND end;
1846     XiveNVT nvt;
1847     int i;
1848     uint64_t xive_nvt_per_subpage;
1849 
1850     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1851                    srcno0, srcno0 + nr_ipis - 1);
1852     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1853 
1854     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1855                    srcno0, srcno0 + nr_ipis - 1);
1856     for (i = 0; i < nr_ipis; i++) {
1857         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1858             break;
1859         }
1860         if (!xive_eas_is_masked(&eas)) {
1861             xive_eas_pic_print_info(&eas, i, mon);
1862         }
1863     }
1864 
1865     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1866     i = 0;
1867     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1868         xive_end_pic_print_info(&end, i++, mon);
1869     }
1870 
1871     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1872     i = 0;
1873     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1874         xive_end_eas_pic_print_info(&end, i++, mon);
1875     }
1876 
1877     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1878                    0, XIVE_NVT_COUNT - 1);
1879     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1880     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1881         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1882             xive_nvt_pic_print_info(&nvt, i++, mon);
1883         }
1884     }
1885 }
1886 
1887 static void pnv_xive_reset(void *dev)
1888 {
1889     PnvXive *xive = PNV_XIVE(dev);
1890     XiveSource *xsrc = &xive->ipi_source;
1891     XiveENDSource *end_xsrc = &xive->end_source;
1892 
1893     /* Default page size (Should be changed at runtime to 64k) */
1894     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1895 
1896     /* Clear subregions */
1897     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1898         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1899     }
1900 
1901     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1902         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1903     }
1904 
1905     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1906         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1907     }
1908 
1909     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1910         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1911     }
1912 }
1913 
1914 static void pnv_xive_init(Object *obj)
1915 {
1916     PnvXive *xive = PNV_XIVE(obj);
1917 
1918     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1919                             TYPE_XIVE_SOURCE);
1920     object_initialize_child(obj, "end_source", &xive->end_source,
1921                             TYPE_XIVE_END_SOURCE);
1922 }
1923 
1924 /*
1925  *  Maximum number of IRQs and ENDs supported by HW
1926  */
1927 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1928 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1929 
1930 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1931 {
1932     PnvXive *xive = PNV_XIVE(dev);
1933     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1934     XiveSource *xsrc = &xive->ipi_source;
1935     XiveENDSource *end_xsrc = &xive->end_source;
1936     Error *local_err = NULL;
1937 
1938     pxc->parent_realize(dev, &local_err);
1939     if (local_err) {
1940         error_propagate(errp, local_err);
1941         return;
1942     }
1943 
1944     assert(xive->chip);
1945 
1946     /*
1947      * The XiveSource and XiveENDSource objects are realized with the
1948      * maximum allowed HW configuration. The ESB MMIO regions will be
1949      * resized dynamically when the controller is configured by the FW
1950      * to limit accesses to resources not provisioned.
1951      */
1952     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1953                             &error_fatal);
1954     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1955     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1956         return;
1957     }
1958 
1959     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1960                             &error_fatal);
1961     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1962                              &error_abort);
1963     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1964         return;
1965     }
1966 
1967     /* Default page size. Generally changed at runtime to 64k */
1968     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1969 
1970     /* XSCOM region, used for initial configuration of the BARs */
1971     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1972                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1973 
1974     /* Interrupt controller MMIO regions */
1975     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1976                        PNV9_XIVE_IC_SIZE);
1977 
1978     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1979                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1980     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1981                           &pnv_xive_ic_notify_ops,
1982                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1983     xive->ic_notify_mmio.disable_reentrancy_guard = true;
1984 
1985     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1986     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1987                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1988 
1989     /* Thread Interrupt Management Area (Indirect) */
1990     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1991                           &xive_tm_indirect_ops,
1992                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1993     /*
1994      * Overall Virtualization Controller MMIO region containing the
1995      * IPI ESB pages and END ESB pages. The layout is defined by the
1996      * EDT "Domain table" and the accesses are dispatched using
1997      * address spaces for each.
1998      */
1999     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
2000                           "xive-vc", PNV9_XIVE_VC_SIZE);
2001 
2002     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
2003                        PNV9_XIVE_VC_SIZE);
2004     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
2005     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
2006                        PNV9_XIVE_VC_SIZE);
2007     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
2008 
2009     /*
2010      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
2011      * VC region. Their size is configured by the FW in the EDT table.
2012      */
2013     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
2014     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
2015 
2016     /* Presenter Controller MMIO region (not modeled) */
2017     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
2018                           "xive-pc", PNV9_XIVE_PC_SIZE);
2019 
2020     /* Thread Interrupt Management Area (Direct) */
2021     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
2022                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
2023 
2024     qemu_register_reset(pnv_xive_reset, dev);
2025 }
2026 
2027 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
2028                              int xscom_offset)
2029 {
2030     const char compat[] = "ibm,power9-xive-x";
2031     char *name;
2032     int offset;
2033     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
2034     uint32_t reg[] = {
2035         cpu_to_be32(lpc_pcba),
2036         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
2037     };
2038 
2039     name = g_strdup_printf("xive@%x", lpc_pcba);
2040     offset = fdt_add_subnode(fdt, xscom_offset, name);
2041     _FDT(offset);
2042     g_free(name);
2043 
2044     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
2045     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
2046                       sizeof(compat))));
2047     return 0;
2048 }
2049 
2050 static Property pnv_xive_properties[] = {
2051     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
2052     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
2053     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
2054     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
2055     /* The PnvChip id identifies the XIVE interrupt controller. */
2056     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
2057     DEFINE_PROP_END_OF_LIST(),
2058 };
2059 
2060 static void pnv_xive_class_init(ObjectClass *klass, void *data)
2061 {
2062     DeviceClass *dc = DEVICE_CLASS(klass);
2063     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
2064     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
2065     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2066     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
2067     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
2068 
2069     xdc->dt_xscom = pnv_xive_dt_xscom;
2070 
2071     dc->desc = "PowerNV XIVE Interrupt Controller";
2072     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
2073     dc->realize = pnv_xive_realize;
2074     device_class_set_props(dc, pnv_xive_properties);
2075 
2076     xrc->get_eas = pnv_xive_get_eas;
2077     xrc->get_pq = pnv_xive_get_pq;
2078     xrc->set_pq = pnv_xive_set_pq;
2079     xrc->get_end = pnv_xive_get_end;
2080     xrc->write_end = pnv_xive_write_end;
2081     xrc->get_nvt = pnv_xive_get_nvt;
2082     xrc->write_nvt = pnv_xive_write_nvt;
2083     xrc->get_block_id = pnv_xive_get_block_id;
2084     xrc->end_notify = pnv_xive_end_notify;
2085 
2086     xnc->notify = pnv_xive_notify;
2087     xpc->match_nvt  = pnv_xive_match_nvt;
2088     xpc->get_config = pnv_xive_presenter_get_config;
2089 };
2090 
2091 static const TypeInfo pnv_xive_info = {
2092     .name          = TYPE_PNV_XIVE,
2093     .parent        = TYPE_XIVE_ROUTER,
2094     .instance_init = pnv_xive_init,
2095     .instance_size = sizeof(PnvXive),
2096     .class_init    = pnv_xive_class_init,
2097     .class_size    = sizeof(PnvXiveClass),
2098     .interfaces    = (InterfaceInfo[]) {
2099         { TYPE_PNV_XSCOM_INTERFACE },
2100         { }
2101     }
2102 };
2103 
2104 static void pnv_xive_register_types(void)
2105 {
2106     type_register_static(&pnv_xive_info);
2107 }
2108 
2109 type_init(pnv_xive_register_types)
2110