xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision a6caeee8)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 #include "trace.h"
28 
29 #include <libfdt.h>
30 
31 #include "pnv_xive_regs.h"
32 
33 #undef XIVE_DEBUG
34 
35 /*
36  * Virtual structures table (VST)
37  */
38 #define SBE_PER_BYTE   4
39 
40 typedef struct XiveVstInfo {
41     const char *name;
42     uint32_t    size;
43     uint32_t    max_blocks;
44 } XiveVstInfo;
45 
46 static const XiveVstInfo vst_infos[] = {
47     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
48     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
49     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
50     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
51 
52     /*
53      *  Interrupt fifo backing store table (not modeled) :
54      *
55      * 0 - IPI,
56      * 1 - HWD,
57      * 2 - First escalate,
58      * 3 - Second escalate,
59      * 4 - Redistribution,
60      * 5 - IPI cascaded queue ?
61      */
62     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
63 };
64 
65 #define xive_error(xive, fmt, ...)                                      \
66     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
67                   (xive)->chip->chip_id, ## __VA_ARGS__);
68 
69 /*
70  * QEMU version of the GETFIELD/SETFIELD macros
71  *
72  * TODO: It might be better to use the existing extract64() and
73  * deposit64() but this means that all the register definitions will
74  * change and become incompatible with the ones found in skiboot.
75  *
76  * Keep it as it is for now until we find a common ground.
77  */
78 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
79 {
80     return (word & mask) >> ctz64(mask);
81 }
82 
83 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
84                                 uint64_t value)
85 {
86     return (word & ~mask) | ((value << ctz64(mask)) & mask);
87 }
88 
89 /*
90  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
91  * field overrides the hardwired chip ID in the Powerbus operations
92  * and for CAM compares
93  */
94 static uint8_t pnv_xive_block_id(PnvXive *xive)
95 {
96     uint8_t blk = xive->chip->chip_id;
97     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
98 
99     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
100         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
101     }
102 
103     return blk;
104 }
105 
106 /*
107  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
108  * of the chips is good enough.
109  *
110  * TODO: Block scope support
111  */
112 static PnvXive *pnv_xive_get_remote(uint8_t blk)
113 {
114     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
115     int i;
116 
117     for (i = 0; i < pnv->num_chips; i++) {
118         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
119         PnvXive *xive = &chip9->xive;
120 
121         if (pnv_xive_block_id(xive) == blk) {
122             return xive;
123         }
124     }
125     return NULL;
126 }
127 
128 /*
129  * VST accessors for SBE, EAT, ENDT, NVT
130  *
131  * Indirect VST tables are arrays of VSDs pointing to a page (of same
132  * size). Each page is a direct VST table.
133  */
134 
135 #define XIVE_VSD_SIZE 8
136 
137 /* Indirect page size can be 4K, 64K, 2M, 16M. */
138 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
139 {
140      return page_shift == 12 || page_shift == 16 ||
141          page_shift == 21 || page_shift == 24;
142 }
143 
144 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
145                                          uint64_t vsd, uint32_t idx)
146 {
147     const XiveVstInfo *info = &vst_infos[type];
148     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
149     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
150     uint32_t idx_max;
151 
152     idx_max = vst_tsize / info->size - 1;
153     if (idx > idx_max) {
154 #ifdef XIVE_DEBUG
155         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
156                    info->name, idx, idx_max);
157 #endif
158         return 0;
159     }
160 
161     return vst_addr + idx * info->size;
162 }
163 
164 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
165                                            uint64_t vsd, uint32_t idx)
166 {
167     const XiveVstInfo *info = &vst_infos[type];
168     uint64_t vsd_addr;
169     uint32_t vsd_idx;
170     uint32_t page_shift;
171     uint32_t vst_per_page;
172 
173     /* Get the page size of the indirect table. */
174     vsd_addr = vsd & VSD_ADDRESS_MASK;
175     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
176                     MEMTXATTRS_UNSPECIFIED)) {
177         xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
178                    info->name, idx, vsd_addr);
179         return 0;
180     }
181 
182     if (!(vsd & VSD_ADDRESS_MASK)) {
183 #ifdef XIVE_DEBUG
184         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
185 #endif
186         return 0;
187     }
188 
189     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
190 
191     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
192         xive_error(xive, "VST: invalid %s page shift %d", info->name,
193                    page_shift);
194         return 0;
195     }
196 
197     vst_per_page = (1ull << page_shift) / info->size;
198     vsd_idx = idx / vst_per_page;
199 
200     /* Load the VSD we are looking for, if not already done */
201     if (vsd_idx) {
202         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
203         if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
204                        MEMTXATTRS_UNSPECIFIED)) {
205             xive_error(xive, "VST: failed to access %s entry %x @0x%"
206                        PRIx64, info->name, vsd_idx, vsd_addr);
207             return 0;
208         }
209 
210         if (!(vsd & VSD_ADDRESS_MASK)) {
211 #ifdef XIVE_DEBUG
212             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
213 #endif
214             return 0;
215         }
216 
217         /*
218          * Check that the pages have a consistent size across the
219          * indirect table
220          */
221         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
222             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
223                        info->name, idx);
224             return 0;
225         }
226     }
227 
228     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
229 }
230 
231 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
232                                   uint32_t idx)
233 {
234     const XiveVstInfo *info = &vst_infos[type];
235     uint64_t vsd;
236 
237     if (blk >= info->max_blocks) {
238         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
239                    blk, info->name, idx);
240         return 0;
241     }
242 
243     vsd = xive->vsds[type][blk];
244 
245     /* Remote VST access */
246     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
247         xive = pnv_xive_get_remote(blk);
248 
249         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
250     }
251 
252     if (VSD_INDIRECT & vsd) {
253         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
254     }
255 
256     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
257 }
258 
259 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
260                              uint32_t idx, void *data)
261 {
262     const XiveVstInfo *info = &vst_infos[type];
263     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
264 
265     if (!addr) {
266         return -1;
267     }
268 
269     cpu_physical_memory_read(addr, data, info->size);
270     return 0;
271 }
272 
273 #define XIVE_VST_WORD_ALL -1
274 
275 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
276                               uint32_t idx, void *data, uint32_t word_number)
277 {
278     const XiveVstInfo *info = &vst_infos[type];
279     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
280 
281     if (!addr) {
282         return -1;
283     }
284 
285     if (word_number == XIVE_VST_WORD_ALL) {
286         cpu_physical_memory_write(addr, data, info->size);
287     } else {
288         cpu_physical_memory_write(addr + word_number * 4,
289                                   data + word_number * 4, 4);
290     }
291     return 0;
292 }
293 
294 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
295                             XiveEND *end)
296 {
297     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
298 }
299 
300 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
301                               XiveEND *end, uint8_t word_number)
302 {
303     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
304                               word_number);
305 }
306 
307 static int pnv_xive_end_update(PnvXive *xive)
308 {
309     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
310                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
311     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
312                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
313     int i;
314     uint64_t eqc_watch[4];
315 
316     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
317         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
318     }
319 
320     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
321                               XIVE_VST_WORD_ALL);
322 }
323 
324 static void pnv_xive_end_cache_load(PnvXive *xive)
325 {
326     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
327                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
328     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
329                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
330     uint64_t eqc_watch[4] = { 0 };
331     int i;
332 
333     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
334         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
335     }
336 
337     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
338         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
339     }
340 }
341 
342 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
343                             XiveNVT *nvt)
344 {
345     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
346 }
347 
348 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
349                               XiveNVT *nvt, uint8_t word_number)
350 {
351     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
352                               word_number);
353 }
354 
355 static int pnv_xive_nvt_update(PnvXive *xive)
356 {
357     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
358                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
359     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
360                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
361     int i;
362     uint64_t vpc_watch[8];
363 
364     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
365         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
366     }
367 
368     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
369                               XIVE_VST_WORD_ALL);
370 }
371 
372 static void pnv_xive_nvt_cache_load(PnvXive *xive)
373 {
374     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
375                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
376     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
377                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
378     uint64_t vpc_watch[8] = { 0 };
379     int i;
380 
381     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
382         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
383     }
384 
385     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
386         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
387     }
388 }
389 
390 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
391                             XiveEAS *eas)
392 {
393     PnvXive *xive = PNV_XIVE(xrtr);
394 
395     /*
396      * EAT lookups should be local to the IC
397      */
398     if (pnv_xive_block_id(xive) != blk) {
399         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
400         return -1;
401     }
402 
403     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
404 }
405 
406 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
407                            uint8_t *pq)
408 {
409     PnvXive *xive = PNV_XIVE(xrtr);
410 
411     if (pnv_xive_block_id(xive) != blk) {
412         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
413         return -1;
414     }
415 
416     *pq = xive_source_esb_get(&xive->ipi_source, idx);
417     return 0;
418 }
419 
420 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
421                            uint8_t *pq)
422 {
423     PnvXive *xive = PNV_XIVE(xrtr);
424 
425     if (pnv_xive_block_id(xive) != blk) {
426         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
427         return -1;
428     }
429 
430     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
431     return 0;
432 }
433 
434 /*
435  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
436  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
437  * second register covers cores 16-23 (normal) or 8-11 (fused).
438  */
439 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
440 {
441     int pir = ppc_cpu_pir(cpu);
442     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
443     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
444     uint32_t bit = pir & 0x3f;
445 
446     return xive->regs[reg >> 3] & PPC_BIT(bit);
447 }
448 
449 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
450                               uint8_t nvt_blk, uint32_t nvt_idx,
451                               bool cam_ignore, uint8_t priority,
452                               uint32_t logic_serv, XiveTCTXMatch *match)
453 {
454     PnvXive *xive = PNV_XIVE(xptr);
455     PnvChip *chip = xive->chip;
456     int count = 0;
457     int i, j;
458 
459     for (i = 0; i < chip->nr_cores; i++) {
460         PnvCore *pc = chip->cores[i];
461         CPUCore *cc = CPU_CORE(pc);
462 
463         for (j = 0; j < cc->nr_threads; j++) {
464             PowerPCCPU *cpu = pc->threads[j];
465             XiveTCTX *tctx;
466             int ring;
467 
468             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
469                 continue;
470             }
471 
472             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
473 
474             /*
475              * Check the thread context CAM lines and record matches.
476              */
477             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
478                                              nvt_idx, cam_ignore, logic_serv);
479             /*
480              * Save the context and follow on to catch duplicates, that we
481              * don't support yet.
482              */
483             if (ring != -1) {
484                 if (match->tctx) {
485                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
486                                   "thread context NVT %x/%x\n",
487                                   nvt_blk, nvt_idx);
488                     return -1;
489                 }
490 
491                 match->ring = ring;
492                 match->tctx = tctx;
493                 count++;
494             }
495         }
496     }
497 
498     return count;
499 }
500 
501 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
502 {
503     return pnv_xive_block_id(PNV_XIVE(xrtr));
504 }
505 
506 /*
507  * The TIMA MMIO space is shared among the chips and to identify the
508  * chip from which the access is being done, we extract the chip id
509  * from the PIR.
510  */
511 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
512 {
513     int pir = ppc_cpu_pir(cpu);
514     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
515     PnvXive *xive = PNV_XIVE(xptr);
516 
517     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
518         xive_error(xive, "IC: CPU %x is not enabled", pir);
519     }
520     return xive;
521 }
522 
523 /*
524  * The internal sources (IPIs) of the interrupt controller have no
525  * knowledge of the XIVE chip on which they reside. Encode the block
526  * id in the source interrupt number before forwarding the source
527  * event notification to the Router. This is required on a multichip
528  * system.
529  */
530 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
531 {
532     PnvXive *xive = PNV_XIVE(xn);
533     uint8_t blk = pnv_xive_block_id(xive);
534 
535     xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
536 }
537 
538 /*
539  * XIVE helpers
540  */
541 
542 static uint64_t pnv_xive_vc_size(PnvXive *xive)
543 {
544     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
545 }
546 
547 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
548 {
549     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
550 }
551 
552 static uint64_t pnv_xive_pc_size(PnvXive *xive)
553 {
554     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
555 }
556 
557 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
558 {
559     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
560     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
561 
562     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
563 }
564 
565 /*
566  * Compute the number of entries per indirect subpage.
567  */
568 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
569 {
570     uint8_t blk = pnv_xive_block_id(xive);
571     uint64_t vsd = xive->vsds[type][blk];
572     const XiveVstInfo *info = &vst_infos[type];
573     uint64_t vsd_addr;
574     uint32_t page_shift;
575 
576     /* For direct tables, fake a valid value */
577     if (!(VSD_INDIRECT & vsd)) {
578         return 1;
579     }
580 
581     /* Get the page size of the indirect table. */
582     vsd_addr = vsd & VSD_ADDRESS_MASK;
583     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
584                    MEMTXATTRS_UNSPECIFIED)) {
585         xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
586                    info->name, vsd_addr);
587         return 0;
588     }
589 
590     if (!(vsd & VSD_ADDRESS_MASK)) {
591 #ifdef XIVE_DEBUG
592         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
593 #endif
594         return 0;
595     }
596 
597     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
598 
599     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
600         xive_error(xive, "VST: invalid %s page shift %d", info->name,
601                    page_shift);
602         return 0;
603     }
604 
605     return (1ull << page_shift) / info->size;
606 }
607 
608 /*
609  * EDT Table
610  *
611  * The Virtualization Controller MMIO region containing the IPI ESB
612  * pages and END ESB pages is sub-divided into "sets" which map
613  * portions of the VC region to the different ESB pages. It is
614  * configured at runtime through the EDT "Domain Table" to let the
615  * firmware decide how to split the VC address space between IPI ESB
616  * pages and END ESB pages.
617  */
618 
619 /*
620  * Computes the overall size of the IPI or the END ESB pages
621  */
622 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
623 {
624     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
625     uint64_t size = 0;
626     int i;
627 
628     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
629         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
630 
631         if (edt_type == type) {
632             size += edt_size;
633         }
634     }
635 
636     return size;
637 }
638 
639 /*
640  * Maps an offset of the VC region in the IPI or END region using the
641  * layout defined by the EDT "Domaine Table"
642  */
643 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
644                                               uint64_t type)
645 {
646     int i;
647     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
648     uint64_t edt_offset = vc_offset;
649 
650     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
651         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
652 
653         if (edt_type != type) {
654             edt_offset -= edt_size;
655         }
656     }
657 
658     return edt_offset;
659 }
660 
661 static void pnv_xive_edt_resize(PnvXive *xive)
662 {
663     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
664     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
665 
666     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
667     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
668 
669     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
670     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
671 }
672 
673 /*
674  * XIVE Table configuration. Only EDT is supported.
675  */
676 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
677 {
678     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
679     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
680     uint64_t *xive_table;
681     uint8_t max_index;
682 
683     switch (tsel) {
684     case CQ_TAR_TSEL_BLK:
685         max_index = ARRAY_SIZE(xive->blk);
686         xive_table = xive->blk;
687         break;
688     case CQ_TAR_TSEL_MIG:
689         max_index = ARRAY_SIZE(xive->mig);
690         xive_table = xive->mig;
691         break;
692     case CQ_TAR_TSEL_EDT:
693         max_index = ARRAY_SIZE(xive->edt);
694         xive_table = xive->edt;
695         break;
696     case CQ_TAR_TSEL_VDT:
697         max_index = ARRAY_SIZE(xive->vdt);
698         xive_table = xive->vdt;
699         break;
700     default:
701         xive_error(xive, "IC: invalid table %d", (int) tsel);
702         return -1;
703     }
704 
705     if (tsel_index >= max_index) {
706         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
707         return -1;
708     }
709 
710     xive_table[tsel_index] = val;
711 
712     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
713         xive->regs[CQ_TAR >> 3] =
714             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
715     }
716 
717     /*
718      * EDT configuration is complete. Resize the MMIO windows exposing
719      * the IPI and the END ESBs in the VC region.
720      */
721     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
722         pnv_xive_edt_resize(xive);
723     }
724 
725     return 0;
726 }
727 
728 /*
729  * Virtual Structure Tables (VST) configuration
730  */
731 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
732                                        uint8_t blk, uint64_t vsd)
733 {
734     XiveENDSource *end_xsrc = &xive->end_source;
735     XiveSource *xsrc = &xive->ipi_source;
736     const XiveVstInfo *info = &vst_infos[type];
737     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
738     uint64_t vst_tsize = 1ull << page_shift;
739     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
740 
741     /* Basic checks */
742 
743     if (VSD_INDIRECT & vsd) {
744         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
745             xive_error(xive, "VST: %s indirect tables are not enabled",
746                        info->name);
747             return;
748         }
749 
750         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
751             xive_error(xive, "VST: invalid %s page shift %d", info->name,
752                        page_shift);
753             return;
754         }
755     }
756 
757     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
758         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
759                    " page shift %d", info->name, vst_addr, page_shift);
760         return;
761     }
762 
763     /* Record the table configuration (in SRAM on HW) */
764     xive->vsds[type][blk] = vsd;
765 
766     /* Now tune the models with the configuration provided by the FW */
767 
768     switch (type) {
769     case VST_TSEL_IVT:  /* Nothing to be done */
770         break;
771 
772     case VST_TSEL_EQDT:
773         /*
774          * Backing store pages for the END.
775          *
776          * If the table is direct, we can compute the number of PQ
777          * entries provisioned by FW (such as skiboot) and resize the
778          * END ESB window accordingly.
779          */
780         if (!(VSD_INDIRECT & vsd)) {
781             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
782                                    * (1ull << xsrc->esb_shift));
783         }
784         memory_region_add_subregion(&xive->end_edt_mmio, 0,
785                                     &end_xsrc->esb_mmio);
786         break;
787 
788     case VST_TSEL_SBE:
789         /*
790          * Backing store pages for the source PQ bits. The model does
791          * not use these PQ bits backed in RAM because the XiveSource
792          * model has its own.
793          *
794          * If the table is direct, we can compute the number of PQ
795          * entries provisioned by FW (such as skiboot) and resize the
796          * ESB window accordingly.
797          */
798         if (!(VSD_INDIRECT & vsd)) {
799             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
800                                    * (1ull << xsrc->esb_shift));
801         }
802         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
803         break;
804 
805     case VST_TSEL_VPDT: /* Not modeled */
806     case VST_TSEL_IRQ:  /* Not modeled */
807         /*
808          * These tables contains the backing store pages for the
809          * interrupt fifos of the VC sub-engine in case of overflow.
810          */
811         break;
812 
813     default:
814         g_assert_not_reached();
815     }
816 }
817 
818 /*
819  * Both PC and VC sub-engines are configured as each use the Virtual
820  * Structure Tables : SBE, EAS, END and NVT.
821  */
822 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
823 {
824     uint8_t mode = GETFIELD(VSD_MODE, vsd);
825     uint8_t type = GETFIELD(VST_TABLE_SELECT,
826                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
827     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
828                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
829     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
830 
831     if (type > VST_TSEL_IRQ) {
832         xive_error(xive, "VST: invalid table type %d", type);
833         return;
834     }
835 
836     if (blk >= vst_infos[type].max_blocks) {
837         xive_error(xive, "VST: invalid block id %d for"
838                       " %s table", blk, vst_infos[type].name);
839         return;
840     }
841 
842     /*
843      * Only take the VC sub-engine configuration into account because
844      * the XiveRouter model combines both VC and PC sub-engines
845      */
846     if (pc_engine) {
847         return;
848     }
849 
850     if (!vst_addr) {
851         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
852         return;
853     }
854 
855     switch (mode) {
856     case VSD_MODE_FORWARD:
857         xive->vsds[type][blk] = vsd;
858         break;
859 
860     case VSD_MODE_EXCLUSIVE:
861         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
862         break;
863 
864     default:
865         xive_error(xive, "VST: unsupported table mode %d", mode);
866         return;
867     }
868 }
869 
870 /*
871  * Interrupt controller MMIO region. The layout is compatible between
872  * 4K and 64K pages :
873  *
874  * Page 0           sub-engine BARs
875  *  0x000 - 0x3FF   IC registers
876  *  0x400 - 0x7FF   PC registers
877  *  0x800 - 0xFFF   VC registers
878  *
879  * Page 1           Notify page (writes only)
880  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
881  *  0x800 - 0xFFF   forwards and syncs
882  *
883  * Page 2           LSI Trigger page (writes only) (not modeled)
884  * Page 3           LSI SB EOI page (reads only) (not modeled)
885  *
886  * Page 4-7         indirect TIMA
887  */
888 
889 /*
890  * IC - registers MMIO
891  */
892 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
893                                   uint64_t val, unsigned size)
894 {
895     PnvXive *xive = PNV_XIVE(opaque);
896     MemoryRegion *sysmem = get_system_memory();
897     uint32_t reg = offset >> 3;
898     bool is_chip0 = xive->chip->chip_id == 0;
899 
900     switch (offset) {
901 
902     /*
903      * XIVE CQ (PowerBus bridge) settings
904      */
905     case CQ_MSGSND:     /* msgsnd for doorbells */
906     case CQ_FIRMASK_OR: /* FIR error reporting */
907         break;
908     case CQ_PBI_CTL:
909         if (val & CQ_PBI_PC_64K) {
910             xive->pc_shift = 16;
911         }
912         if (val & CQ_PBI_VC_64K) {
913             xive->vc_shift = 16;
914         }
915         break;
916     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
917         /*
918          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
919          */
920         break;
921 
922     /*
923      * XIVE Virtualization Controller settings
924      */
925     case VC_GLOBAL_CONFIG:
926         break;
927 
928     /*
929      * XIVE Presenter Controller settings
930      */
931     case PC_GLOBAL_CONFIG:
932         /*
933          * PC_GCONF_CHIPID_OVR
934          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
935          */
936         break;
937     case PC_TCTXT_CFG:
938         /*
939          * TODO: block group support
940          */
941         break;
942     case PC_TCTXT_TRACK:
943         /*
944          * PC_TCTXT_TRACK_EN:
945          *   enable block tracking and exchange of block ownership
946          *   information between Interrupt controllers
947          */
948         break;
949 
950     /*
951      * Misc settings
952      */
953     case VC_SBC_CONFIG: /* Store EOI configuration */
954         /*
955          * Configure store EOI if required by firwmare (skiboot has removed
956          * support recently though)
957          */
958         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
959             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
960         }
961         break;
962 
963     case VC_EQC_CONFIG: /* TODO: silent escalation */
964     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
965         break;
966 
967     /*
968      * XIVE BAR settings (XSCOM only)
969      */
970     case CQ_RST_CTL:
971         /* bit4: resets all BAR registers */
972         break;
973 
974     case CQ_IC_BAR: /* IC BAR. 8 pages */
975         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
976         if (!(val & CQ_IC_BAR_VALID)) {
977             xive->ic_base = 0;
978             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
979                 memory_region_del_subregion(&xive->ic_mmio,
980                                             &xive->ic_reg_mmio);
981                 memory_region_del_subregion(&xive->ic_mmio,
982                                             &xive->ic_notify_mmio);
983                 memory_region_del_subregion(&xive->ic_mmio,
984                                             &xive->ic_lsi_mmio);
985                 memory_region_del_subregion(&xive->ic_mmio,
986                                             &xive->tm_indirect_mmio);
987 
988                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
989             }
990         } else {
991             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
992             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
993                 memory_region_add_subregion(sysmem, xive->ic_base,
994                                             &xive->ic_mmio);
995 
996                 memory_region_add_subregion(&xive->ic_mmio,  0,
997                                             &xive->ic_reg_mmio);
998                 memory_region_add_subregion(&xive->ic_mmio,
999                                             1ul << xive->ic_shift,
1000                                             &xive->ic_notify_mmio);
1001                 memory_region_add_subregion(&xive->ic_mmio,
1002                                             2ul << xive->ic_shift,
1003                                             &xive->ic_lsi_mmio);
1004                 memory_region_add_subregion(&xive->ic_mmio,
1005                                             4ull << xive->ic_shift,
1006                                             &xive->tm_indirect_mmio);
1007             }
1008         }
1009         break;
1010 
1011     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
1012     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
1013         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
1014         if (!(val & CQ_TM_BAR_VALID)) {
1015             xive->tm_base = 0;
1016             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
1017                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
1018             }
1019         } else {
1020             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1021             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1022                 memory_region_add_subregion(sysmem, xive->tm_base,
1023                                             &xive->tm_mmio);
1024             }
1025         }
1026         break;
1027 
1028     case CQ_PC_BARM:
1029         xive->regs[reg] = val;
1030         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1031         break;
1032     case CQ_PC_BAR: /* From 32M to 512G */
1033         if (!(val & CQ_PC_BAR_VALID)) {
1034             xive->pc_base = 0;
1035             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1036                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1037             }
1038         } else {
1039             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1040             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1041                 memory_region_add_subregion(sysmem, xive->pc_base,
1042                                             &xive->pc_mmio);
1043             }
1044         }
1045         break;
1046 
1047     case CQ_VC_BARM:
1048         xive->regs[reg] = val;
1049         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1050         break;
1051     case CQ_VC_BAR: /* From 64M to 4TB */
1052         if (!(val & CQ_VC_BAR_VALID)) {
1053             xive->vc_base = 0;
1054             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1055                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1056             }
1057         } else {
1058             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1059             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1060                 memory_region_add_subregion(sysmem, xive->vc_base,
1061                                             &xive->vc_mmio);
1062             }
1063         }
1064         break;
1065 
1066     /*
1067      * XIVE Table settings.
1068      */
1069     case CQ_TAR: /* Table Address */
1070         break;
1071     case CQ_TDR: /* Table Data */
1072         pnv_xive_table_set_data(xive, val);
1073         break;
1074 
1075     /*
1076      * XIVE VC & PC Virtual Structure Table settings
1077      */
1078     case VC_VSD_TABLE_ADDR:
1079     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1080         break;
1081     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1082     case PC_VSD_TABLE_DATA:
1083         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1084         break;
1085 
1086     /*
1087      * Interrupt fifo overflow in memory backing store (Not modeled)
1088      */
1089     case VC_IRQ_CONFIG_IPI:
1090     case VC_IRQ_CONFIG_HW:
1091     case VC_IRQ_CONFIG_CASCADE1:
1092     case VC_IRQ_CONFIG_CASCADE2:
1093     case VC_IRQ_CONFIG_REDIST:
1094     case VC_IRQ_CONFIG_IPI_CASC:
1095         break;
1096 
1097     /*
1098      * XIVE hardware thread enablement
1099      */
1100     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1101     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1102         break;
1103 
1104     case PC_THREAD_EN_REG0_SET:
1105         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1106         break;
1107     case PC_THREAD_EN_REG1_SET:
1108         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1109         break;
1110     case PC_THREAD_EN_REG0_CLR:
1111         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1112         break;
1113     case PC_THREAD_EN_REG1_CLR:
1114         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1115         break;
1116 
1117     /*
1118      * Indirect TIMA access set up. Defines the PIR of the HW thread
1119      * to use.
1120      */
1121     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1122         break;
1123 
1124     /*
1125      * XIVE PC & VC cache updates for EAS, NVT and END
1126      */
1127     case VC_IVC_SCRUB_MASK:
1128     case VC_IVC_SCRUB_TRIG:
1129         break;
1130 
1131     case VC_EQC_CWATCH_SPEC:
1132         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1133         break;
1134     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1135         break;
1136     case VC_EQC_CWATCH_DAT0:
1137         /* writing to DATA0 triggers the cache write */
1138         xive->regs[reg] = val;
1139         pnv_xive_end_update(xive);
1140         break;
1141     case VC_EQC_SCRUB_MASK:
1142     case VC_EQC_SCRUB_TRIG:
1143         /*
1144          * The scrubbing registers flush the cache in RAM and can also
1145          * invalidate.
1146          */
1147         break;
1148 
1149     case PC_VPC_CWATCH_SPEC:
1150         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1151         break;
1152     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1153         break;
1154     case PC_VPC_CWATCH_DAT0:
1155         /* writing to DATA0 triggers the cache write */
1156         xive->regs[reg] = val;
1157         pnv_xive_nvt_update(xive);
1158         break;
1159     case PC_VPC_SCRUB_MASK:
1160     case PC_VPC_SCRUB_TRIG:
1161         /*
1162          * The scrubbing registers flush the cache in RAM and can also
1163          * invalidate.
1164          */
1165         break;
1166 
1167 
1168     /*
1169      * XIVE PC & VC cache invalidation
1170      */
1171     case PC_AT_KILL:
1172         break;
1173     case VC_AT_MACRO_KILL:
1174         break;
1175     case PC_AT_KILL_MASK:
1176     case VC_AT_MACRO_KILL_MASK:
1177         break;
1178 
1179     default:
1180         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1181         return;
1182     }
1183 
1184     xive->regs[reg] = val;
1185 }
1186 
1187 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1188 {
1189     PnvXive *xive = PNV_XIVE(opaque);
1190     uint64_t val = 0;
1191     uint32_t reg = offset >> 3;
1192 
1193     switch (offset) {
1194     case CQ_CFG_PB_GEN:
1195     case CQ_IC_BAR:
1196     case CQ_TM1_BAR:
1197     case CQ_TM2_BAR:
1198     case CQ_PC_BAR:
1199     case CQ_PC_BARM:
1200     case CQ_VC_BAR:
1201     case CQ_VC_BARM:
1202     case CQ_TAR:
1203     case CQ_TDR:
1204     case CQ_PBI_CTL:
1205 
1206     case PC_TCTXT_CFG:
1207     case PC_TCTXT_TRACK:
1208     case PC_TCTXT_INDIR0:
1209     case PC_TCTXT_INDIR1:
1210     case PC_TCTXT_INDIR2:
1211     case PC_TCTXT_INDIR3:
1212     case PC_GLOBAL_CONFIG:
1213 
1214     case PC_VPC_SCRUB_MASK:
1215 
1216     case VC_GLOBAL_CONFIG:
1217     case VC_AIB_TX_ORDER_TAG2:
1218 
1219     case VC_IRQ_CONFIG_IPI:
1220     case VC_IRQ_CONFIG_HW:
1221     case VC_IRQ_CONFIG_CASCADE1:
1222     case VC_IRQ_CONFIG_CASCADE2:
1223     case VC_IRQ_CONFIG_REDIST:
1224     case VC_IRQ_CONFIG_IPI_CASC:
1225 
1226     case VC_EQC_SCRUB_MASK:
1227     case VC_IVC_SCRUB_MASK:
1228     case VC_SBC_CONFIG:
1229     case VC_AT_MACRO_KILL_MASK:
1230     case VC_VSD_TABLE_ADDR:
1231     case PC_VSD_TABLE_ADDR:
1232     case VC_VSD_TABLE_DATA:
1233     case PC_VSD_TABLE_DATA:
1234     case PC_THREAD_EN_REG0:
1235     case PC_THREAD_EN_REG1:
1236         val = xive->regs[reg];
1237         break;
1238 
1239     /*
1240      * XIVE hardware thread enablement
1241      */
1242     case PC_THREAD_EN_REG0_SET:
1243     case PC_THREAD_EN_REG0_CLR:
1244         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1245         break;
1246     case PC_THREAD_EN_REG1_SET:
1247     case PC_THREAD_EN_REG1_CLR:
1248         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1249         break;
1250 
1251     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1252         val = 0xffffff0000000000;
1253         break;
1254 
1255     /*
1256      * XIVE PC & VC cache updates for EAS, NVT and END
1257      */
1258     case VC_EQC_CWATCH_SPEC:
1259         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1260         val = xive->regs[reg];
1261         break;
1262     case VC_EQC_CWATCH_DAT0:
1263         /*
1264          * Load DATA registers from cache with data requested by the
1265          * SPEC register
1266          */
1267         pnv_xive_end_cache_load(xive);
1268         val = xive->regs[reg];
1269         break;
1270     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1271         val = xive->regs[reg];
1272         break;
1273 
1274     case PC_VPC_CWATCH_SPEC:
1275         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1276         val = xive->regs[reg];
1277         break;
1278     case PC_VPC_CWATCH_DAT0:
1279         /*
1280          * Load DATA registers from cache with data requested by the
1281          * SPEC register
1282          */
1283         pnv_xive_nvt_cache_load(xive);
1284         val = xive->regs[reg];
1285         break;
1286     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1287         val = xive->regs[reg];
1288         break;
1289 
1290     case PC_VPC_SCRUB_TRIG:
1291     case VC_IVC_SCRUB_TRIG:
1292     case VC_EQC_SCRUB_TRIG:
1293         xive->regs[reg] &= ~VC_SCRUB_VALID;
1294         val = xive->regs[reg];
1295         break;
1296 
1297     /*
1298      * XIVE PC & VC cache invalidation
1299      */
1300     case PC_AT_KILL:
1301         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1302         val = xive->regs[reg];
1303         break;
1304     case VC_AT_MACRO_KILL:
1305         xive->regs[reg] &= ~VC_KILL_VALID;
1306         val = xive->regs[reg];
1307         break;
1308 
1309     /*
1310      * XIVE synchronisation
1311      */
1312     case VC_EQC_CONFIG:
1313         val = VC_EQC_SYNC_MASK;
1314         break;
1315 
1316     default:
1317         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1318     }
1319 
1320     return val;
1321 }
1322 
1323 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1324     .read = pnv_xive_ic_reg_read,
1325     .write = pnv_xive_ic_reg_write,
1326     .endianness = DEVICE_BIG_ENDIAN,
1327     .valid = {
1328         .min_access_size = 8,
1329         .max_access_size = 8,
1330     },
1331     .impl = {
1332         .min_access_size = 8,
1333         .max_access_size = 8,
1334     },
1335 };
1336 
1337 /*
1338  * IC - Notify MMIO port page (write only)
1339  */
1340 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1341 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1342 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1343 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1344 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1345 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1346 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1347 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1348 
1349 /* VC synchronisation */
1350 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1351 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1352 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1353 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1354 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1355 
1356 /* PC synchronisation */
1357 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1358 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1359 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1360 
1361 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1362 {
1363     uint8_t blk;
1364     uint32_t idx;
1365 
1366     trace_pnv_xive_ic_hw_trigger(addr, val);
1367 
1368     if (val & XIVE_TRIGGER_END) {
1369         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1370                    addr, val);
1371         return;
1372     }
1373 
1374     /*
1375      * Forward the source event notification directly to the Router.
1376      * The source interrupt number should already be correctly encoded
1377      * with the chip block id by the sending device (PHB, PSI).
1378      */
1379     blk = XIVE_EAS_BLOCK(val);
1380     idx = XIVE_EAS_INDEX(val);
1381 
1382     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1383                        !!(val & XIVE_TRIGGER_PQ));
1384 }
1385 
1386 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1387                                      unsigned size)
1388 {
1389     PnvXive *xive = PNV_XIVE(opaque);
1390 
1391     /* VC: HW triggers */
1392     switch (addr) {
1393     case 0x000 ... 0x7FF:
1394         pnv_xive_ic_hw_trigger(opaque, addr, val);
1395         break;
1396 
1397     /* VC: Forwarded IRQs */
1398     case PNV_XIVE_FORWARD_IPI:
1399     case PNV_XIVE_FORWARD_HW:
1400     case PNV_XIVE_FORWARD_OS_ESC:
1401     case PNV_XIVE_FORWARD_HW_ESC:
1402     case PNV_XIVE_FORWARD_REDIS:
1403         /* TODO: forwarded IRQs. Should be like HW triggers */
1404         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1405                    addr, val);
1406         break;
1407 
1408     /* VC syncs */
1409     case PNV_XIVE_SYNC_IPI:
1410     case PNV_XIVE_SYNC_HW:
1411     case PNV_XIVE_SYNC_OS_ESC:
1412     case PNV_XIVE_SYNC_HW_ESC:
1413     case PNV_XIVE_SYNC_REDIS:
1414         break;
1415 
1416     /* PC syncs */
1417     case PNV_XIVE_SYNC_PULL:
1418     case PNV_XIVE_SYNC_PUSH:
1419     case PNV_XIVE_SYNC_VPC:
1420         break;
1421 
1422     default:
1423         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1424     }
1425 }
1426 
1427 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1428                                         unsigned size)
1429 {
1430     PnvXive *xive = PNV_XIVE(opaque);
1431 
1432     /* loads are invalid */
1433     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1434     return -1;
1435 }
1436 
1437 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1438     .read = pnv_xive_ic_notify_read,
1439     .write = pnv_xive_ic_notify_write,
1440     .endianness = DEVICE_BIG_ENDIAN,
1441     .valid = {
1442         .min_access_size = 8,
1443         .max_access_size = 8,
1444     },
1445     .impl = {
1446         .min_access_size = 8,
1447         .max_access_size = 8,
1448     },
1449 };
1450 
1451 /*
1452  * IC - LSI MMIO handlers (not modeled)
1453  */
1454 
1455 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1456                               uint64_t val, unsigned size)
1457 {
1458     PnvXive *xive = PNV_XIVE(opaque);
1459 
1460     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1461 }
1462 
1463 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1464 {
1465     PnvXive *xive = PNV_XIVE(opaque);
1466 
1467     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1468     return -1;
1469 }
1470 
1471 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1472     .read = pnv_xive_ic_lsi_read,
1473     .write = pnv_xive_ic_lsi_write,
1474     .endianness = DEVICE_BIG_ENDIAN,
1475     .valid = {
1476         .min_access_size = 8,
1477         .max_access_size = 8,
1478     },
1479     .impl = {
1480         .min_access_size = 8,
1481         .max_access_size = 8,
1482     },
1483 };
1484 
1485 /*
1486  * IC - Indirect TIMA MMIO handlers
1487  */
1488 
1489 /*
1490  * When the TIMA is accessed from the indirect page, the thread id of
1491  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1492  * use. This is used for resets and for debug purpose also.
1493  */
1494 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1495 {
1496     PnvChip *chip = xive->chip;
1497     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1498     PowerPCCPU *cpu = NULL;
1499     int pir;
1500 
1501     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1502         xive_error(xive, "IC: no indirect TIMA access in progress");
1503         return NULL;
1504     }
1505 
1506     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1507     cpu = pnv_chip_find_cpu(chip, pir);
1508     if (!cpu) {
1509         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1510         return NULL;
1511     }
1512 
1513     /* Check that HW thread is XIVE enabled */
1514     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1515         xive_error(xive, "IC: CPU %x is not enabled", pir);
1516     }
1517 
1518     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1519 }
1520 
1521 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1522                                    uint64_t value, unsigned size)
1523 {
1524     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1525 
1526     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1527 }
1528 
1529 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1530                                       unsigned size)
1531 {
1532     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1533 
1534     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1535 }
1536 
1537 static const MemoryRegionOps xive_tm_indirect_ops = {
1538     .read = xive_tm_indirect_read,
1539     .write = xive_tm_indirect_write,
1540     .endianness = DEVICE_BIG_ENDIAN,
1541     .valid = {
1542         .min_access_size = 1,
1543         .max_access_size = 8,
1544     },
1545     .impl = {
1546         .min_access_size = 1,
1547         .max_access_size = 8,
1548     },
1549 };
1550 
1551 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1552                               uint64_t value, unsigned size)
1553 {
1554     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1555     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1556     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1557 
1558     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1559 }
1560 
1561 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1562 {
1563     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1564     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1565     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1566 
1567     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1568 }
1569 
1570 const MemoryRegionOps pnv_xive_tm_ops = {
1571     .read = pnv_xive_tm_read,
1572     .write = pnv_xive_tm_write,
1573     .endianness = DEVICE_BIG_ENDIAN,
1574     .valid = {
1575         .min_access_size = 1,
1576         .max_access_size = 8,
1577     },
1578     .impl = {
1579         .min_access_size = 1,
1580         .max_access_size = 8,
1581     },
1582 };
1583 
1584 /*
1585  * Interrupt controller XSCOM region.
1586  */
1587 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1588 {
1589     switch (addr >> 3) {
1590     case X_VC_EQC_CONFIG:
1591         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1592         return VC_EQC_SYNC_MASK;
1593     default:
1594         return pnv_xive_ic_reg_read(opaque, addr, size);
1595     }
1596 }
1597 
1598 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1599                                 uint64_t val, unsigned size)
1600 {
1601     pnv_xive_ic_reg_write(opaque, addr, val, size);
1602 }
1603 
1604 static const MemoryRegionOps pnv_xive_xscom_ops = {
1605     .read = pnv_xive_xscom_read,
1606     .write = pnv_xive_xscom_write,
1607     .endianness = DEVICE_BIG_ENDIAN,
1608     .valid = {
1609         .min_access_size = 8,
1610         .max_access_size = 8,
1611     },
1612     .impl = {
1613         .min_access_size = 8,
1614         .max_access_size = 8,
1615     }
1616 };
1617 
1618 /*
1619  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1620  */
1621 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1622                                  unsigned size)
1623 {
1624     PnvXive *xive = PNV_XIVE(opaque);
1625     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1626     uint64_t edt_type = 0;
1627     uint64_t edt_offset;
1628     MemTxResult result;
1629     AddressSpace *edt_as = NULL;
1630     uint64_t ret = -1;
1631 
1632     if (edt_index < XIVE_TABLE_EDT_MAX) {
1633         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1634     }
1635 
1636     switch (edt_type) {
1637     case CQ_TDR_EDT_IPI:
1638         edt_as = &xive->ipi_as;
1639         break;
1640     case CQ_TDR_EDT_EQ:
1641         edt_as = &xive->end_as;
1642         break;
1643     default:
1644         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1645         return -1;
1646     }
1647 
1648     /* Remap the offset for the targeted address space */
1649     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1650 
1651     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1652                             &result);
1653 
1654     if (result != MEMTX_OK) {
1655         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1656                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1657                    offset, edt_offset);
1658         return -1;
1659     }
1660 
1661     return ret;
1662 }
1663 
1664 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1665                               uint64_t val, unsigned size)
1666 {
1667     PnvXive *xive = PNV_XIVE(opaque);
1668     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1669     uint64_t edt_type = 0;
1670     uint64_t edt_offset;
1671     MemTxResult result;
1672     AddressSpace *edt_as = NULL;
1673 
1674     if (edt_index < XIVE_TABLE_EDT_MAX) {
1675         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1676     }
1677 
1678     switch (edt_type) {
1679     case CQ_TDR_EDT_IPI:
1680         edt_as = &xive->ipi_as;
1681         break;
1682     case CQ_TDR_EDT_EQ:
1683         edt_as = &xive->end_as;
1684         break;
1685     default:
1686         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1687                    offset);
1688         return;
1689     }
1690 
1691     /* Remap the offset for the targeted address space */
1692     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1693 
1694     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1695     if (result != MEMTX_OK) {
1696         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1697     }
1698 }
1699 
1700 static const MemoryRegionOps pnv_xive_vc_ops = {
1701     .read = pnv_xive_vc_read,
1702     .write = pnv_xive_vc_write,
1703     .endianness = DEVICE_BIG_ENDIAN,
1704     .valid = {
1705         .min_access_size = 8,
1706         .max_access_size = 8,
1707     },
1708     .impl = {
1709         .min_access_size = 8,
1710         .max_access_size = 8,
1711     },
1712 };
1713 
1714 /*
1715  * Presenter Controller MMIO region. The Virtualization Controller
1716  * updates the IPB in the NVT table when required. Not modeled.
1717  */
1718 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1719                                  unsigned size)
1720 {
1721     PnvXive *xive = PNV_XIVE(opaque);
1722 
1723     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1724     return -1;
1725 }
1726 
1727 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1728                               uint64_t value, unsigned size)
1729 {
1730     PnvXive *xive = PNV_XIVE(opaque);
1731 
1732     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1733 }
1734 
1735 static const MemoryRegionOps pnv_xive_pc_ops = {
1736     .read = pnv_xive_pc_read,
1737     .write = pnv_xive_pc_write,
1738     .endianness = DEVICE_BIG_ENDIAN,
1739     .valid = {
1740         .min_access_size = 8,
1741         .max_access_size = 8,
1742     },
1743     .impl = {
1744         .min_access_size = 8,
1745         .max_access_size = 8,
1746     },
1747 };
1748 
1749 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1750                                     Monitor *mon)
1751 {
1752     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1753     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1754 
1755     if (!xive_nvt_is_valid(nvt)) {
1756         return;
1757     }
1758 
1759     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1760                    eq_blk, eq_idx,
1761                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1762 }
1763 
1764 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1765 {
1766     XiveRouter *xrtr = XIVE_ROUTER(xive);
1767     uint8_t blk = pnv_xive_block_id(xive);
1768     uint8_t chip_id = xive->chip->chip_id;
1769     uint32_t srcno0 = XIVE_EAS(blk, 0);
1770     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1771     XiveEAS eas;
1772     XiveEND end;
1773     XiveNVT nvt;
1774     int i;
1775     uint64_t xive_nvt_per_subpage;
1776 
1777     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1778                    srcno0, srcno0 + nr_ipis - 1);
1779     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1780 
1781     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1782                    srcno0, srcno0 + nr_ipis - 1);
1783     for (i = 0; i < nr_ipis; i++) {
1784         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1785             break;
1786         }
1787         if (!xive_eas_is_masked(&eas)) {
1788             xive_eas_pic_print_info(&eas, i, mon);
1789         }
1790     }
1791 
1792     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1793     i = 0;
1794     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1795         xive_end_pic_print_info(&end, i++, mon);
1796     }
1797 
1798     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1799     i = 0;
1800     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1801         xive_end_eas_pic_print_info(&end, i++, mon);
1802     }
1803 
1804     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1805                    0, XIVE_NVT_COUNT - 1);
1806     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1807     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1808         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1809             xive_nvt_pic_print_info(&nvt, i++, mon);
1810         }
1811     }
1812 }
1813 
1814 static void pnv_xive_reset(void *dev)
1815 {
1816     PnvXive *xive = PNV_XIVE(dev);
1817     XiveSource *xsrc = &xive->ipi_source;
1818     XiveENDSource *end_xsrc = &xive->end_source;
1819 
1820     /* Default page size (Should be changed at runtime to 64k) */
1821     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1822 
1823     /* Clear subregions */
1824     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1825         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1826     }
1827 
1828     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1829         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1830     }
1831 
1832     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1833         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1834     }
1835 
1836     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1837         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1838     }
1839 }
1840 
1841 static void pnv_xive_init(Object *obj)
1842 {
1843     PnvXive *xive = PNV_XIVE(obj);
1844 
1845     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1846                             TYPE_XIVE_SOURCE);
1847     object_initialize_child(obj, "end_source", &xive->end_source,
1848                             TYPE_XIVE_END_SOURCE);
1849 }
1850 
1851 /*
1852  *  Maximum number of IRQs and ENDs supported by HW
1853  */
1854 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1855 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1856 
1857 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1858 {
1859     PnvXive *xive = PNV_XIVE(dev);
1860     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1861     XiveSource *xsrc = &xive->ipi_source;
1862     XiveENDSource *end_xsrc = &xive->end_source;
1863     Error *local_err = NULL;
1864 
1865     pxc->parent_realize(dev, &local_err);
1866     if (local_err) {
1867         error_propagate(errp, local_err);
1868         return;
1869     }
1870 
1871     assert(xive->chip);
1872 
1873     /*
1874      * The XiveSource and XiveENDSource objects are realized with the
1875      * maximum allowed HW configuration. The ESB MMIO regions will be
1876      * resized dynamically when the controller is configured by the FW
1877      * to limit accesses to resources not provisioned.
1878      */
1879     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1880                             &error_fatal);
1881     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1882     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1883         return;
1884     }
1885 
1886     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1887                             &error_fatal);
1888     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1889                              &error_abort);
1890     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1891         return;
1892     }
1893 
1894     /* Default page size. Generally changed at runtime to 64k */
1895     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1896 
1897     /* XSCOM region, used for initial configuration of the BARs */
1898     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1899                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1900 
1901     /* Interrupt controller MMIO regions */
1902     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1903                        PNV9_XIVE_IC_SIZE);
1904 
1905     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1906                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1907     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1908                           &pnv_xive_ic_notify_ops,
1909                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1910 
1911     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1912     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1913                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1914 
1915     /* Thread Interrupt Management Area (Indirect) */
1916     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1917                           &xive_tm_indirect_ops,
1918                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1919     /*
1920      * Overall Virtualization Controller MMIO region containing the
1921      * IPI ESB pages and END ESB pages. The layout is defined by the
1922      * EDT "Domain table" and the accesses are dispatched using
1923      * address spaces for each.
1924      */
1925     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1926                           "xive-vc", PNV9_XIVE_VC_SIZE);
1927 
1928     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1929                        PNV9_XIVE_VC_SIZE);
1930     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1931     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1932                        PNV9_XIVE_VC_SIZE);
1933     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1934 
1935     /*
1936      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1937      * VC region. Their size is configured by the FW in the EDT table.
1938      */
1939     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1940     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1941 
1942     /* Presenter Controller MMIO region (not modeled) */
1943     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1944                           "xive-pc", PNV9_XIVE_PC_SIZE);
1945 
1946     /* Thread Interrupt Management Area (Direct) */
1947     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1948                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1949 
1950     qemu_register_reset(pnv_xive_reset, dev);
1951 }
1952 
1953 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1954                              int xscom_offset)
1955 {
1956     const char compat[] = "ibm,power9-xive-x";
1957     char *name;
1958     int offset;
1959     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1960     uint32_t reg[] = {
1961         cpu_to_be32(lpc_pcba),
1962         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1963     };
1964 
1965     name = g_strdup_printf("xive@%x", lpc_pcba);
1966     offset = fdt_add_subnode(fdt, xscom_offset, name);
1967     _FDT(offset);
1968     g_free(name);
1969 
1970     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1971     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1972                       sizeof(compat))));
1973     return 0;
1974 }
1975 
1976 static Property pnv_xive_properties[] = {
1977     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1978     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1979     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1980     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1981     /* The PnvChip id identifies the XIVE interrupt controller. */
1982     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1983     DEFINE_PROP_END_OF_LIST(),
1984 };
1985 
1986 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1987 {
1988     DeviceClass *dc = DEVICE_CLASS(klass);
1989     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1990     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1991     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1992     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1993     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
1994 
1995     xdc->dt_xscom = pnv_xive_dt_xscom;
1996 
1997     dc->desc = "PowerNV XIVE Interrupt Controller";
1998     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
1999     dc->realize = pnv_xive_realize;
2000     device_class_set_props(dc, pnv_xive_properties);
2001 
2002     xrc->get_eas = pnv_xive_get_eas;
2003     xrc->get_pq = pnv_xive_get_pq;
2004     xrc->set_pq = pnv_xive_set_pq;
2005     xrc->get_end = pnv_xive_get_end;
2006     xrc->write_end = pnv_xive_write_end;
2007     xrc->get_nvt = pnv_xive_get_nvt;
2008     xrc->write_nvt = pnv_xive_write_nvt;
2009     xrc->get_block_id = pnv_xive_get_block_id;
2010 
2011     xnc->notify = pnv_xive_notify;
2012     xpc->match_nvt  = pnv_xive_match_nvt;
2013 };
2014 
2015 static const TypeInfo pnv_xive_info = {
2016     .name          = TYPE_PNV_XIVE,
2017     .parent        = TYPE_XIVE_ROUTER,
2018     .instance_init = pnv_xive_init,
2019     .instance_size = sizeof(PnvXive),
2020     .class_init    = pnv_xive_class_init,
2021     .class_size    = sizeof(PnvXiveClass),
2022     .interfaces    = (InterfaceInfo[]) {
2023         { TYPE_PNV_XSCOM_INTERFACE },
2024         { }
2025     }
2026 };
2027 
2028 static void pnv_xive_register_types(void)
2029 {
2030     type_register_static(&pnv_xive_info);
2031 }
2032 
2033 type_init(pnv_xive_register_types)
2034