xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision d044b7c3)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_chip.h"
22 #include "hw/ppc/pnv_core.h"
23 #include "hw/ppc/pnv_xscom.h"
24 #include "hw/ppc/pnv_xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/qdev-properties.h"
27 #include "hw/ppc/ppc.h"
28 #include "trace.h"
29 
30 #include <libfdt.h>
31 
32 #include "pnv_xive_regs.h"
33 
34 #undef XIVE_DEBUG
35 
36 /*
37  * Virtual structures table (VST)
38  */
39 #define SBE_PER_BYTE   4
40 
41 typedef struct XiveVstInfo {
42     const char *name;
43     uint32_t    size;
44     uint32_t    max_blocks;
45 } XiveVstInfo;
46 
47 static const XiveVstInfo vst_infos[] = {
48     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
49     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
50     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
51     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
52 
53     /*
54      *  Interrupt fifo backing store table (not modeled) :
55      *
56      * 0 - IPI,
57      * 1 - HWD,
58      * 2 - First escalate,
59      * 3 - Second escalate,
60      * 4 - Redistribution,
61      * 5 - IPI cascaded queue ?
62      */
63     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
64 };
65 
66 #define xive_error(xive, fmt, ...)                                      \
67     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
68                   (xive)->chip->chip_id, ## __VA_ARGS__);
69 
70 /*
71  * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
72  * field overrides the hardwired chip ID in the Powerbus operations
73  * and for CAM compares
74  */
75 static uint8_t pnv_xive_block_id(PnvXive *xive)
76 {
77     uint8_t blk = xive->chip->chip_id;
78     uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
79 
80     if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
81         blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
82     }
83 
84     return blk;
85 }
86 
87 /*
88  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
89  * of the chips is good enough.
90  *
91  * TODO: Block scope support
92  */
93 static PnvXive *pnv_xive_get_remote(uint8_t blk)
94 {
95     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
96     int i;
97 
98     for (i = 0; i < pnv->num_chips; i++) {
99         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
100         PnvXive *xive = &chip9->xive;
101 
102         if (pnv_xive_block_id(xive) == blk) {
103             return xive;
104         }
105     }
106     return NULL;
107 }
108 
109 /*
110  * VST accessors for SBE, EAT, ENDT, NVT
111  *
112  * Indirect VST tables are arrays of VSDs pointing to a page (of same
113  * size). Each page is a direct VST table.
114  */
115 
116 #define XIVE_VSD_SIZE 8
117 
118 /* Indirect page size can be 4K, 64K, 2M, 16M. */
119 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
120 {
121      return page_shift == 12 || page_shift == 16 ||
122          page_shift == 21 || page_shift == 24;
123 }
124 
125 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
126                                          uint64_t vsd, uint32_t idx)
127 {
128     const XiveVstInfo *info = &vst_infos[type];
129     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
130     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
131     uint32_t idx_max;
132 
133     idx_max = vst_tsize / info->size - 1;
134     if (idx > idx_max) {
135 #ifdef XIVE_DEBUG
136         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
137                    info->name, idx, idx_max);
138 #endif
139         return 0;
140     }
141 
142     return vst_addr + idx * info->size;
143 }
144 
145 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
146                                            uint64_t vsd, uint32_t idx)
147 {
148     const XiveVstInfo *info = &vst_infos[type];
149     uint64_t vsd_addr;
150     uint32_t vsd_idx;
151     uint32_t page_shift;
152     uint32_t vst_per_page;
153 
154     /* Get the page size of the indirect table. */
155     vsd_addr = vsd & VSD_ADDRESS_MASK;
156     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
157                     MEMTXATTRS_UNSPECIFIED)) {
158         xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
159                    info->name, idx, vsd_addr);
160         return 0;
161     }
162 
163     if (!(vsd & VSD_ADDRESS_MASK)) {
164 #ifdef XIVE_DEBUG
165         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
166 #endif
167         return 0;
168     }
169 
170     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
171 
172     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
173         xive_error(xive, "VST: invalid %s page shift %d", info->name,
174                    page_shift);
175         return 0;
176     }
177 
178     vst_per_page = (1ull << page_shift) / info->size;
179     vsd_idx = idx / vst_per_page;
180 
181     /* Load the VSD we are looking for, if not already done */
182     if (vsd_idx) {
183         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
184         if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
185                        MEMTXATTRS_UNSPECIFIED)) {
186             xive_error(xive, "VST: failed to access %s entry %x @0x%"
187                        PRIx64, info->name, vsd_idx, vsd_addr);
188             return 0;
189         }
190 
191         if (!(vsd & VSD_ADDRESS_MASK)) {
192 #ifdef XIVE_DEBUG
193             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
194 #endif
195             return 0;
196         }
197 
198         /*
199          * Check that the pages have a consistent size across the
200          * indirect table
201          */
202         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
203             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
204                        info->name, idx);
205             return 0;
206         }
207     }
208 
209     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
210 }
211 
212 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
213                                   uint32_t idx)
214 {
215     const XiveVstInfo *info = &vst_infos[type];
216     uint64_t vsd;
217 
218     if (blk >= info->max_blocks) {
219         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
220                    blk, info->name, idx);
221         return 0;
222     }
223 
224     vsd = xive->vsds[type][blk];
225 
226     /* Remote VST access */
227     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
228         xive = pnv_xive_get_remote(blk);
229 
230         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
231     }
232 
233     if (VSD_INDIRECT & vsd) {
234         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
235     }
236 
237     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
238 }
239 
240 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
241                              uint32_t idx, void *data)
242 {
243     const XiveVstInfo *info = &vst_infos[type];
244     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
245 
246     if (!addr) {
247         return -1;
248     }
249 
250     cpu_physical_memory_read(addr, data, info->size);
251     return 0;
252 }
253 
254 #define XIVE_VST_WORD_ALL -1
255 
256 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
257                               uint32_t idx, void *data, uint32_t word_number)
258 {
259     const XiveVstInfo *info = &vst_infos[type];
260     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
261 
262     if (!addr) {
263         return -1;
264     }
265 
266     if (word_number == XIVE_VST_WORD_ALL) {
267         cpu_physical_memory_write(addr, data, info->size);
268     } else {
269         cpu_physical_memory_write(addr + word_number * 4,
270                                   data + word_number * 4, 4);
271     }
272     return 0;
273 }
274 
275 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
276                             XiveEND *end)
277 {
278     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
279 }
280 
281 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
282                               XiveEND *end, uint8_t word_number)
283 {
284     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
285                               word_number);
286 }
287 
288 static int pnv_xive_end_update(PnvXive *xive)
289 {
290     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
291                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
292     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
293                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
294     int i;
295     uint64_t eqc_watch[4];
296 
297     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
298         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
299     }
300 
301     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
302                               XIVE_VST_WORD_ALL);
303 }
304 
305 static void pnv_xive_end_cache_load(PnvXive *xive)
306 {
307     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
308                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
309     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
310                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
311     uint64_t eqc_watch[4] = { 0 };
312     int i;
313 
314     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
315         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
316     }
317 
318     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
319         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
320     }
321 }
322 
323 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
324                             XiveNVT *nvt)
325 {
326     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
327 }
328 
329 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
330                               XiveNVT *nvt, uint8_t word_number)
331 {
332     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
333                               word_number);
334 }
335 
336 static int pnv_xive_nvt_update(PnvXive *xive)
337 {
338     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
339                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
340     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
341                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
342     int i;
343     uint64_t vpc_watch[8];
344 
345     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
346         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
347     }
348 
349     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
350                               XIVE_VST_WORD_ALL);
351 }
352 
353 static void pnv_xive_nvt_cache_load(PnvXive *xive)
354 {
355     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
356                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
357     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
358                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
359     uint64_t vpc_watch[8] = { 0 };
360     int i;
361 
362     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
363         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
364     }
365 
366     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
367         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
368     }
369 }
370 
371 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
372                             XiveEAS *eas)
373 {
374     PnvXive *xive = PNV_XIVE(xrtr);
375 
376     /*
377      * EAT lookups should be local to the IC
378      */
379     if (pnv_xive_block_id(xive) != blk) {
380         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
381         return -1;
382     }
383 
384     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
385 }
386 
387 static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
388                            uint8_t *pq)
389 {
390     PnvXive *xive = PNV_XIVE(xrtr);
391 
392     if (pnv_xive_block_id(xive) != blk) {
393         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
394         return -1;
395     }
396 
397     *pq = xive_source_esb_get(&xive->ipi_source, idx);
398     return 0;
399 }
400 
401 static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
402                            uint8_t *pq)
403 {
404     PnvXive *xive = PNV_XIVE(xrtr);
405 
406     if (pnv_xive_block_id(xive) != blk) {
407         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
408         return -1;
409     }
410 
411     *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
412     return 0;
413 }
414 
415 /*
416  * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
417  * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
418  * second register covers cores 16-23 (normal) or 8-11 (fused).
419  */
420 static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
421 {
422     int pir = ppc_cpu_pir(cpu);
423     uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
424     uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
425     uint32_t bit = pir & 0x3f;
426 
427     return xive->regs[reg >> 3] & PPC_BIT(bit);
428 }
429 
430 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
431                               uint8_t nvt_blk, uint32_t nvt_idx,
432                               bool cam_ignore, uint8_t priority,
433                               uint32_t logic_serv, XiveTCTXMatch *match)
434 {
435     PnvXive *xive = PNV_XIVE(xptr);
436     PnvChip *chip = xive->chip;
437     int count = 0;
438     int i, j;
439 
440     for (i = 0; i < chip->nr_cores; i++) {
441         PnvCore *pc = chip->cores[i];
442         CPUCore *cc = CPU_CORE(pc);
443 
444         for (j = 0; j < cc->nr_threads; j++) {
445             PowerPCCPU *cpu = pc->threads[j];
446             XiveTCTX *tctx;
447             int ring;
448 
449             if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
450                 continue;
451             }
452 
453             tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
454 
455             /*
456              * Check the thread context CAM lines and record matches.
457              */
458             ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
459                                              nvt_idx, cam_ignore, logic_serv);
460             /*
461              * Save the context and follow on to catch duplicates, that we
462              * don't support yet.
463              */
464             if (ring != -1) {
465                 if (match->tctx) {
466                     qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
467                                   "thread context NVT %x/%x\n",
468                                   nvt_blk, nvt_idx);
469                     return -1;
470                 }
471 
472                 match->ring = ring;
473                 match->tctx = tctx;
474                 count++;
475             }
476         }
477     }
478 
479     return count;
480 }
481 
482 static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
483 {
484     return pnv_xive_block_id(PNV_XIVE(xrtr));
485 }
486 
487 /*
488  * The TIMA MMIO space is shared among the chips and to identify the
489  * chip from which the access is being done, we extract the chip id
490  * from the PIR.
491  */
492 static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
493 {
494     int pir = ppc_cpu_pir(cpu);
495     XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
496     PnvXive *xive = PNV_XIVE(xptr);
497 
498     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
499         xive_error(xive, "IC: CPU %x is not enabled", pir);
500     }
501     return xive;
502 }
503 
504 /*
505  * The internal sources (IPIs) of the interrupt controller have no
506  * knowledge of the XIVE chip on which they reside. Encode the block
507  * id in the source interrupt number before forwarding the source
508  * event notification to the Router. This is required on a multichip
509  * system.
510  */
511 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
512 {
513     PnvXive *xive = PNV_XIVE(xn);
514     uint8_t blk = pnv_xive_block_id(xive);
515 
516     xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
517 }
518 
519 /*
520  * XIVE helpers
521  */
522 
523 static uint64_t pnv_xive_vc_size(PnvXive *xive)
524 {
525     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
526 }
527 
528 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
529 {
530     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
531 }
532 
533 static uint64_t pnv_xive_pc_size(PnvXive *xive)
534 {
535     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
536 }
537 
538 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
539 {
540     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
541     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
542 
543     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
544 }
545 
546 /*
547  * Compute the number of entries per indirect subpage.
548  */
549 static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
550 {
551     uint8_t blk = pnv_xive_block_id(xive);
552     uint64_t vsd = xive->vsds[type][blk];
553     const XiveVstInfo *info = &vst_infos[type];
554     uint64_t vsd_addr;
555     uint32_t page_shift;
556 
557     /* For direct tables, fake a valid value */
558     if (!(VSD_INDIRECT & vsd)) {
559         return 1;
560     }
561 
562     /* Get the page size of the indirect table. */
563     vsd_addr = vsd & VSD_ADDRESS_MASK;
564     if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
565                    MEMTXATTRS_UNSPECIFIED)) {
566         xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
567                    info->name, vsd_addr);
568         return 0;
569     }
570 
571     if (!(vsd & VSD_ADDRESS_MASK)) {
572 #ifdef XIVE_DEBUG
573         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
574 #endif
575         return 0;
576     }
577 
578     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
579 
580     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
581         xive_error(xive, "VST: invalid %s page shift %d", info->name,
582                    page_shift);
583         return 0;
584     }
585 
586     return (1ull << page_shift) / info->size;
587 }
588 
589 /*
590  * EDT Table
591  *
592  * The Virtualization Controller MMIO region containing the IPI ESB
593  * pages and END ESB pages is sub-divided into "sets" which map
594  * portions of the VC region to the different ESB pages. It is
595  * configured at runtime through the EDT "Domain Table" to let the
596  * firmware decide how to split the VC address space between IPI ESB
597  * pages and END ESB pages.
598  */
599 
600 /*
601  * Computes the overall size of the IPI or the END ESB pages
602  */
603 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
604 {
605     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
606     uint64_t size = 0;
607     int i;
608 
609     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
610         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
611 
612         if (edt_type == type) {
613             size += edt_size;
614         }
615     }
616 
617     return size;
618 }
619 
620 /*
621  * Maps an offset of the VC region in the IPI or END region using the
622  * layout defined by the EDT "Domaine Table"
623  */
624 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
625                                               uint64_t type)
626 {
627     int i;
628     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
629     uint64_t edt_offset = vc_offset;
630 
631     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
632         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
633 
634         if (edt_type != type) {
635             edt_offset -= edt_size;
636         }
637     }
638 
639     return edt_offset;
640 }
641 
642 static void pnv_xive_edt_resize(PnvXive *xive)
643 {
644     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
645     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
646 
647     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
648     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
649 
650     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
651     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
652 }
653 
654 /*
655  * XIVE Table configuration. Only EDT is supported.
656  */
657 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
658 {
659     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
660     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
661     uint64_t *xive_table;
662     uint8_t max_index;
663 
664     switch (tsel) {
665     case CQ_TAR_TSEL_BLK:
666         max_index = ARRAY_SIZE(xive->blk);
667         xive_table = xive->blk;
668         break;
669     case CQ_TAR_TSEL_MIG:
670         max_index = ARRAY_SIZE(xive->mig);
671         xive_table = xive->mig;
672         break;
673     case CQ_TAR_TSEL_EDT:
674         max_index = ARRAY_SIZE(xive->edt);
675         xive_table = xive->edt;
676         break;
677     case CQ_TAR_TSEL_VDT:
678         max_index = ARRAY_SIZE(xive->vdt);
679         xive_table = xive->vdt;
680         break;
681     default:
682         xive_error(xive, "IC: invalid table %d", (int) tsel);
683         return -1;
684     }
685 
686     if (tsel_index >= max_index) {
687         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
688         return -1;
689     }
690 
691     xive_table[tsel_index] = val;
692 
693     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
694         xive->regs[CQ_TAR >> 3] =
695             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
696     }
697 
698     /*
699      * EDT configuration is complete. Resize the MMIO windows exposing
700      * the IPI and the END ESBs in the VC region.
701      */
702     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
703         pnv_xive_edt_resize(xive);
704     }
705 
706     return 0;
707 }
708 
709 /*
710  * Virtual Structure Tables (VST) configuration
711  */
712 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
713                                        uint8_t blk, uint64_t vsd)
714 {
715     XiveENDSource *end_xsrc = &xive->end_source;
716     XiveSource *xsrc = &xive->ipi_source;
717     const XiveVstInfo *info = &vst_infos[type];
718     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
719     uint64_t vst_tsize = 1ull << page_shift;
720     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
721 
722     /* Basic checks */
723 
724     if (VSD_INDIRECT & vsd) {
725         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
726             xive_error(xive, "VST: %s indirect tables are not enabled",
727                        info->name);
728             return;
729         }
730 
731         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
732             xive_error(xive, "VST: invalid %s page shift %d", info->name,
733                        page_shift);
734             return;
735         }
736     }
737 
738     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
739         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
740                    " page shift %d", info->name, vst_addr, page_shift);
741         return;
742     }
743 
744     /* Record the table configuration (in SRAM on HW) */
745     xive->vsds[type][blk] = vsd;
746 
747     /* Now tune the models with the configuration provided by the FW */
748 
749     switch (type) {
750     case VST_TSEL_IVT:  /* Nothing to be done */
751         break;
752 
753     case VST_TSEL_EQDT:
754         /*
755          * Backing store pages for the END.
756          *
757          * If the table is direct, we can compute the number of PQ
758          * entries provisioned by FW (such as skiboot) and resize the
759          * END ESB window accordingly.
760          */
761         if (!(VSD_INDIRECT & vsd)) {
762             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
763                                    * (1ull << xsrc->esb_shift));
764         }
765         memory_region_add_subregion(&xive->end_edt_mmio, 0,
766                                     &end_xsrc->esb_mmio);
767         break;
768 
769     case VST_TSEL_SBE:
770         /*
771          * Backing store pages for the source PQ bits. The model does
772          * not use these PQ bits backed in RAM because the XiveSource
773          * model has its own.
774          *
775          * If the table is direct, we can compute the number of PQ
776          * entries provisioned by FW (such as skiboot) and resize the
777          * ESB window accordingly.
778          */
779         if (!(VSD_INDIRECT & vsd)) {
780             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
781                                    * (1ull << xsrc->esb_shift));
782         }
783         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
784         break;
785 
786     case VST_TSEL_VPDT: /* Not modeled */
787     case VST_TSEL_IRQ:  /* Not modeled */
788         /*
789          * These tables contains the backing store pages for the
790          * interrupt fifos of the VC sub-engine in case of overflow.
791          */
792         break;
793 
794     default:
795         g_assert_not_reached();
796     }
797 }
798 
799 /*
800  * Both PC and VC sub-engines are configured as each use the Virtual
801  * Structure Tables : SBE, EAS, END and NVT.
802  */
803 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
804 {
805     uint8_t mode = GETFIELD(VSD_MODE, vsd);
806     uint8_t type = GETFIELD(VST_TABLE_SELECT,
807                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
808     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
809                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
810     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
811 
812     if (type > VST_TSEL_IRQ) {
813         xive_error(xive, "VST: invalid table type %d", type);
814         return;
815     }
816 
817     if (blk >= vst_infos[type].max_blocks) {
818         xive_error(xive, "VST: invalid block id %d for"
819                       " %s table", blk, vst_infos[type].name);
820         return;
821     }
822 
823     /*
824      * Only take the VC sub-engine configuration into account because
825      * the XiveRouter model combines both VC and PC sub-engines
826      */
827     if (pc_engine) {
828         return;
829     }
830 
831     if (!vst_addr) {
832         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
833         return;
834     }
835 
836     switch (mode) {
837     case VSD_MODE_FORWARD:
838         xive->vsds[type][blk] = vsd;
839         break;
840 
841     case VSD_MODE_EXCLUSIVE:
842         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
843         break;
844 
845     default:
846         xive_error(xive, "VST: unsupported table mode %d", mode);
847         return;
848     }
849 }
850 
851 /*
852  * Interrupt controller MMIO region. The layout is compatible between
853  * 4K and 64K pages :
854  *
855  * Page 0           sub-engine BARs
856  *  0x000 - 0x3FF   IC registers
857  *  0x400 - 0x7FF   PC registers
858  *  0x800 - 0xFFF   VC registers
859  *
860  * Page 1           Notify page (writes only)
861  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
862  *  0x800 - 0xFFF   forwards and syncs
863  *
864  * Page 2           LSI Trigger page (writes only) (not modeled)
865  * Page 3           LSI SB EOI page (reads only) (not modeled)
866  *
867  * Page 4-7         indirect TIMA
868  */
869 
870 /*
871  * IC - registers MMIO
872  */
873 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
874                                   uint64_t val, unsigned size)
875 {
876     PnvXive *xive = PNV_XIVE(opaque);
877     MemoryRegion *sysmem = get_system_memory();
878     uint32_t reg = offset >> 3;
879     bool is_chip0 = xive->chip->chip_id == 0;
880 
881     switch (offset) {
882 
883     /*
884      * XIVE CQ (PowerBus bridge) settings
885      */
886     case CQ_MSGSND:     /* msgsnd for doorbells */
887     case CQ_FIRMASK_OR: /* FIR error reporting */
888         break;
889     case CQ_PBI_CTL:
890         if (val & CQ_PBI_PC_64K) {
891             xive->pc_shift = 16;
892         }
893         if (val & CQ_PBI_VC_64K) {
894             xive->vc_shift = 16;
895         }
896         break;
897     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
898         /*
899          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
900          */
901         break;
902 
903     /*
904      * XIVE Virtualization Controller settings
905      */
906     case VC_GLOBAL_CONFIG:
907         break;
908 
909     /*
910      * XIVE Presenter Controller settings
911      */
912     case PC_GLOBAL_CONFIG:
913         /*
914          * PC_GCONF_CHIPID_OVR
915          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
916          */
917         break;
918     case PC_TCTXT_CFG:
919         /*
920          * TODO: block group support
921          */
922         break;
923     case PC_TCTXT_TRACK:
924         /*
925          * PC_TCTXT_TRACK_EN:
926          *   enable block tracking and exchange of block ownership
927          *   information between Interrupt controllers
928          */
929         break;
930 
931     /*
932      * Misc settings
933      */
934     case VC_SBC_CONFIG: /* Store EOI configuration */
935         /*
936          * Configure store EOI if required by firwmare (skiboot has removed
937          * support recently though)
938          */
939         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
940             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
941         }
942         break;
943 
944     case VC_EQC_CONFIG: /* TODO: silent escalation */
945     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
946         break;
947 
948     /*
949      * XIVE BAR settings (XSCOM only)
950      */
951     case CQ_RST_CTL:
952         /* bit4: resets all BAR registers */
953         break;
954 
955     case CQ_IC_BAR: /* IC BAR. 8 pages */
956         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
957         if (!(val & CQ_IC_BAR_VALID)) {
958             xive->ic_base = 0;
959             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
960                 memory_region_del_subregion(&xive->ic_mmio,
961                                             &xive->ic_reg_mmio);
962                 memory_region_del_subregion(&xive->ic_mmio,
963                                             &xive->ic_notify_mmio);
964                 memory_region_del_subregion(&xive->ic_mmio,
965                                             &xive->ic_lsi_mmio);
966                 memory_region_del_subregion(&xive->ic_mmio,
967                                             &xive->tm_indirect_mmio);
968 
969                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
970             }
971         } else {
972             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
973             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
974                 memory_region_add_subregion(sysmem, xive->ic_base,
975                                             &xive->ic_mmio);
976 
977                 memory_region_add_subregion(&xive->ic_mmio,  0,
978                                             &xive->ic_reg_mmio);
979                 memory_region_add_subregion(&xive->ic_mmio,
980                                             1ul << xive->ic_shift,
981                                             &xive->ic_notify_mmio);
982                 memory_region_add_subregion(&xive->ic_mmio,
983                                             2ul << xive->ic_shift,
984                                             &xive->ic_lsi_mmio);
985                 memory_region_add_subregion(&xive->ic_mmio,
986                                             4ull << xive->ic_shift,
987                                             &xive->tm_indirect_mmio);
988             }
989         }
990         break;
991 
992     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
993     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
994         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
995         if (!(val & CQ_TM_BAR_VALID)) {
996             xive->tm_base = 0;
997             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
998                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
999             }
1000         } else {
1001             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1002             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1003                 memory_region_add_subregion(sysmem, xive->tm_base,
1004                                             &xive->tm_mmio);
1005             }
1006         }
1007         break;
1008 
1009     case CQ_PC_BARM:
1010         xive->regs[reg] = val;
1011         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1012         break;
1013     case CQ_PC_BAR: /* From 32M to 512G */
1014         if (!(val & CQ_PC_BAR_VALID)) {
1015             xive->pc_base = 0;
1016             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1017                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1018             }
1019         } else {
1020             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1021             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1022                 memory_region_add_subregion(sysmem, xive->pc_base,
1023                                             &xive->pc_mmio);
1024             }
1025         }
1026         break;
1027 
1028     case CQ_VC_BARM:
1029         xive->regs[reg] = val;
1030         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1031         break;
1032     case CQ_VC_BAR: /* From 64M to 4TB */
1033         if (!(val & CQ_VC_BAR_VALID)) {
1034             xive->vc_base = 0;
1035             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1036                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1037             }
1038         } else {
1039             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1040             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1041                 memory_region_add_subregion(sysmem, xive->vc_base,
1042                                             &xive->vc_mmio);
1043             }
1044         }
1045         break;
1046 
1047     /*
1048      * XIVE Table settings.
1049      */
1050     case CQ_TAR: /* Table Address */
1051         break;
1052     case CQ_TDR: /* Table Data */
1053         pnv_xive_table_set_data(xive, val);
1054         break;
1055 
1056     /*
1057      * XIVE VC & PC Virtual Structure Table settings
1058      */
1059     case VC_VSD_TABLE_ADDR:
1060     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
1061         break;
1062     case VC_VSD_TABLE_DATA: /* Virtual table setting */
1063     case PC_VSD_TABLE_DATA:
1064         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1065         break;
1066 
1067     /*
1068      * Interrupt fifo overflow in memory backing store (Not modeled)
1069      */
1070     case VC_IRQ_CONFIG_IPI:
1071     case VC_IRQ_CONFIG_HW:
1072     case VC_IRQ_CONFIG_CASCADE1:
1073     case VC_IRQ_CONFIG_CASCADE2:
1074     case VC_IRQ_CONFIG_REDIST:
1075     case VC_IRQ_CONFIG_IPI_CASC:
1076         break;
1077 
1078     /*
1079      * XIVE hardware thread enablement
1080      */
1081     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
1082     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
1083         break;
1084 
1085     case PC_THREAD_EN_REG0_SET:
1086         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1087         break;
1088     case PC_THREAD_EN_REG1_SET:
1089         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1090         break;
1091     case PC_THREAD_EN_REG0_CLR:
1092         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1093         break;
1094     case PC_THREAD_EN_REG1_CLR:
1095         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1096         break;
1097 
1098     /*
1099      * Indirect TIMA access set up. Defines the PIR of the HW thread
1100      * to use.
1101      */
1102     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1103         break;
1104 
1105     /*
1106      * XIVE PC & VC cache updates for EAS, NVT and END
1107      */
1108     case VC_IVC_SCRUB_MASK:
1109     case VC_IVC_SCRUB_TRIG:
1110         break;
1111 
1112     case VC_EQC_CWATCH_SPEC:
1113         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1114         break;
1115     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1116         break;
1117     case VC_EQC_CWATCH_DAT0:
1118         /* writing to DATA0 triggers the cache write */
1119         xive->regs[reg] = val;
1120         pnv_xive_end_update(xive);
1121         break;
1122     case VC_EQC_SCRUB_MASK:
1123     case VC_EQC_SCRUB_TRIG:
1124         /*
1125          * The scrubbing registers flush the cache in RAM and can also
1126          * invalidate.
1127          */
1128         break;
1129 
1130     case PC_VPC_CWATCH_SPEC:
1131         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1132         break;
1133     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1134         break;
1135     case PC_VPC_CWATCH_DAT0:
1136         /* writing to DATA0 triggers the cache write */
1137         xive->regs[reg] = val;
1138         pnv_xive_nvt_update(xive);
1139         break;
1140     case PC_VPC_SCRUB_MASK:
1141     case PC_VPC_SCRUB_TRIG:
1142         /*
1143          * The scrubbing registers flush the cache in RAM and can also
1144          * invalidate.
1145          */
1146         break;
1147 
1148 
1149     /*
1150      * XIVE PC & VC cache invalidation
1151      */
1152     case PC_AT_KILL:
1153         break;
1154     case VC_AT_MACRO_KILL:
1155         break;
1156     case PC_AT_KILL_MASK:
1157     case VC_AT_MACRO_KILL_MASK:
1158         break;
1159 
1160     default:
1161         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1162         return;
1163     }
1164 
1165     xive->regs[reg] = val;
1166 }
1167 
1168 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1169 {
1170     PnvXive *xive = PNV_XIVE(opaque);
1171     uint64_t val = 0;
1172     uint32_t reg = offset >> 3;
1173 
1174     switch (offset) {
1175     case CQ_CFG_PB_GEN:
1176     case CQ_IC_BAR:
1177     case CQ_TM1_BAR:
1178     case CQ_TM2_BAR:
1179     case CQ_PC_BAR:
1180     case CQ_PC_BARM:
1181     case CQ_VC_BAR:
1182     case CQ_VC_BARM:
1183     case CQ_TAR:
1184     case CQ_TDR:
1185     case CQ_PBI_CTL:
1186 
1187     case PC_TCTXT_CFG:
1188     case PC_TCTXT_TRACK:
1189     case PC_TCTXT_INDIR0:
1190     case PC_TCTXT_INDIR1:
1191     case PC_TCTXT_INDIR2:
1192     case PC_TCTXT_INDIR3:
1193     case PC_GLOBAL_CONFIG:
1194 
1195     case PC_VPC_SCRUB_MASK:
1196 
1197     case VC_GLOBAL_CONFIG:
1198     case VC_AIB_TX_ORDER_TAG2:
1199 
1200     case VC_IRQ_CONFIG_IPI:
1201     case VC_IRQ_CONFIG_HW:
1202     case VC_IRQ_CONFIG_CASCADE1:
1203     case VC_IRQ_CONFIG_CASCADE2:
1204     case VC_IRQ_CONFIG_REDIST:
1205     case VC_IRQ_CONFIG_IPI_CASC:
1206 
1207     case VC_EQC_SCRUB_MASK:
1208     case VC_IVC_SCRUB_MASK:
1209     case VC_SBC_CONFIG:
1210     case VC_AT_MACRO_KILL_MASK:
1211     case VC_VSD_TABLE_ADDR:
1212     case PC_VSD_TABLE_ADDR:
1213     case VC_VSD_TABLE_DATA:
1214     case PC_VSD_TABLE_DATA:
1215     case PC_THREAD_EN_REG0:
1216     case PC_THREAD_EN_REG1:
1217         val = xive->regs[reg];
1218         break;
1219 
1220     /*
1221      * XIVE hardware thread enablement
1222      */
1223     case PC_THREAD_EN_REG0_SET:
1224     case PC_THREAD_EN_REG0_CLR:
1225         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1226         break;
1227     case PC_THREAD_EN_REG1_SET:
1228     case PC_THREAD_EN_REG1_CLR:
1229         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1230         break;
1231 
1232     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1233         val = 0xffffff0000000000;
1234         break;
1235 
1236     /*
1237      * XIVE PC & VC cache updates for EAS, NVT and END
1238      */
1239     case VC_EQC_CWATCH_SPEC:
1240         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1241         val = xive->regs[reg];
1242         break;
1243     case VC_EQC_CWATCH_DAT0:
1244         /*
1245          * Load DATA registers from cache with data requested by the
1246          * SPEC register
1247          */
1248         pnv_xive_end_cache_load(xive);
1249         val = xive->regs[reg];
1250         break;
1251     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1252         val = xive->regs[reg];
1253         break;
1254 
1255     case PC_VPC_CWATCH_SPEC:
1256         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1257         val = xive->regs[reg];
1258         break;
1259     case PC_VPC_CWATCH_DAT0:
1260         /*
1261          * Load DATA registers from cache with data requested by the
1262          * SPEC register
1263          */
1264         pnv_xive_nvt_cache_load(xive);
1265         val = xive->regs[reg];
1266         break;
1267     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1268         val = xive->regs[reg];
1269         break;
1270 
1271     case PC_VPC_SCRUB_TRIG:
1272     case VC_IVC_SCRUB_TRIG:
1273     case VC_EQC_SCRUB_TRIG:
1274         xive->regs[reg] &= ~VC_SCRUB_VALID;
1275         val = xive->regs[reg];
1276         break;
1277 
1278     /*
1279      * XIVE PC & VC cache invalidation
1280      */
1281     case PC_AT_KILL:
1282         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1283         val = xive->regs[reg];
1284         break;
1285     case VC_AT_MACRO_KILL:
1286         xive->regs[reg] &= ~VC_KILL_VALID;
1287         val = xive->regs[reg];
1288         break;
1289 
1290     /*
1291      * XIVE synchronisation
1292      */
1293     case VC_EQC_CONFIG:
1294         val = VC_EQC_SYNC_MASK;
1295         break;
1296 
1297     default:
1298         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1299     }
1300 
1301     return val;
1302 }
1303 
1304 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1305     .read = pnv_xive_ic_reg_read,
1306     .write = pnv_xive_ic_reg_write,
1307     .endianness = DEVICE_BIG_ENDIAN,
1308     .valid = {
1309         .min_access_size = 8,
1310         .max_access_size = 8,
1311     },
1312     .impl = {
1313         .min_access_size = 8,
1314         .max_access_size = 8,
1315     },
1316 };
1317 
1318 /*
1319  * IC - Notify MMIO port page (write only)
1320  */
1321 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1322 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1323 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1324 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1325 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1326 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1327 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1328 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1329 
1330 /* VC synchronisation */
1331 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1332 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1333 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1334 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1335 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1336 
1337 /* PC synchronisation */
1338 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1339 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1340 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1341 
1342 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1343 {
1344     uint8_t blk;
1345     uint32_t idx;
1346 
1347     trace_pnv_xive_ic_hw_trigger(addr, val);
1348 
1349     if (val & XIVE_TRIGGER_END) {
1350         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1351                    addr, val);
1352         return;
1353     }
1354 
1355     /*
1356      * Forward the source event notification directly to the Router.
1357      * The source interrupt number should already be correctly encoded
1358      * with the chip block id by the sending device (PHB, PSI).
1359      */
1360     blk = XIVE_EAS_BLOCK(val);
1361     idx = XIVE_EAS_INDEX(val);
1362 
1363     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1364                        !!(val & XIVE_TRIGGER_PQ));
1365 }
1366 
1367 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1368                                      unsigned size)
1369 {
1370     PnvXive *xive = PNV_XIVE(opaque);
1371 
1372     /* VC: HW triggers */
1373     switch (addr) {
1374     case 0x000 ... 0x7FF:
1375         pnv_xive_ic_hw_trigger(opaque, addr, val);
1376         break;
1377 
1378     /* VC: Forwarded IRQs */
1379     case PNV_XIVE_FORWARD_IPI:
1380     case PNV_XIVE_FORWARD_HW:
1381     case PNV_XIVE_FORWARD_OS_ESC:
1382     case PNV_XIVE_FORWARD_HW_ESC:
1383     case PNV_XIVE_FORWARD_REDIS:
1384         /* TODO: forwarded IRQs. Should be like HW triggers */
1385         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1386                    addr, val);
1387         break;
1388 
1389     /* VC syncs */
1390     case PNV_XIVE_SYNC_IPI:
1391     case PNV_XIVE_SYNC_HW:
1392     case PNV_XIVE_SYNC_OS_ESC:
1393     case PNV_XIVE_SYNC_HW_ESC:
1394     case PNV_XIVE_SYNC_REDIS:
1395         break;
1396 
1397     /* PC syncs */
1398     case PNV_XIVE_SYNC_PULL:
1399     case PNV_XIVE_SYNC_PUSH:
1400     case PNV_XIVE_SYNC_VPC:
1401         break;
1402 
1403     default:
1404         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1405     }
1406 }
1407 
1408 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1409                                         unsigned size)
1410 {
1411     PnvXive *xive = PNV_XIVE(opaque);
1412 
1413     /* loads are invalid */
1414     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1415     return -1;
1416 }
1417 
1418 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1419     .read = pnv_xive_ic_notify_read,
1420     .write = pnv_xive_ic_notify_write,
1421     .endianness = DEVICE_BIG_ENDIAN,
1422     .valid = {
1423         .min_access_size = 8,
1424         .max_access_size = 8,
1425     },
1426     .impl = {
1427         .min_access_size = 8,
1428         .max_access_size = 8,
1429     },
1430 };
1431 
1432 /*
1433  * IC - LSI MMIO handlers (not modeled)
1434  */
1435 
1436 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1437                               uint64_t val, unsigned size)
1438 {
1439     PnvXive *xive = PNV_XIVE(opaque);
1440 
1441     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1442 }
1443 
1444 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1445 {
1446     PnvXive *xive = PNV_XIVE(opaque);
1447 
1448     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1449     return -1;
1450 }
1451 
1452 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1453     .read = pnv_xive_ic_lsi_read,
1454     .write = pnv_xive_ic_lsi_write,
1455     .endianness = DEVICE_BIG_ENDIAN,
1456     .valid = {
1457         .min_access_size = 8,
1458         .max_access_size = 8,
1459     },
1460     .impl = {
1461         .min_access_size = 8,
1462         .max_access_size = 8,
1463     },
1464 };
1465 
1466 /*
1467  * IC - Indirect TIMA MMIO handlers
1468  */
1469 
1470 /*
1471  * When the TIMA is accessed from the indirect page, the thread id of
1472  * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1473  * use. This is used for resets and for debug purpose also.
1474  */
1475 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1476 {
1477     PnvChip *chip = xive->chip;
1478     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1479     PowerPCCPU *cpu = NULL;
1480     int pir;
1481 
1482     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1483         xive_error(xive, "IC: no indirect TIMA access in progress");
1484         return NULL;
1485     }
1486 
1487     pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1488     cpu = pnv_chip_find_cpu(chip, pir);
1489     if (!cpu) {
1490         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1491         return NULL;
1492     }
1493 
1494     /* Check that HW thread is XIVE enabled */
1495     if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1496         xive_error(xive, "IC: CPU %x is not enabled", pir);
1497     }
1498 
1499     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1500 }
1501 
1502 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1503                                    uint64_t value, unsigned size)
1504 {
1505     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1506 
1507     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1508 }
1509 
1510 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1511                                       unsigned size)
1512 {
1513     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1514 
1515     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1516 }
1517 
1518 static const MemoryRegionOps xive_tm_indirect_ops = {
1519     .read = xive_tm_indirect_read,
1520     .write = xive_tm_indirect_write,
1521     .endianness = DEVICE_BIG_ENDIAN,
1522     .valid = {
1523         .min_access_size = 1,
1524         .max_access_size = 8,
1525     },
1526     .impl = {
1527         .min_access_size = 1,
1528         .max_access_size = 8,
1529     },
1530 };
1531 
1532 static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1533                               uint64_t value, unsigned size)
1534 {
1535     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1536     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1537     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1538 
1539     xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1540 }
1541 
1542 static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1543 {
1544     PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1545     PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1546     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1547 
1548     return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1549 }
1550 
1551 const MemoryRegionOps pnv_xive_tm_ops = {
1552     .read = pnv_xive_tm_read,
1553     .write = pnv_xive_tm_write,
1554     .endianness = DEVICE_BIG_ENDIAN,
1555     .valid = {
1556         .min_access_size = 1,
1557         .max_access_size = 8,
1558     },
1559     .impl = {
1560         .min_access_size = 1,
1561         .max_access_size = 8,
1562     },
1563 };
1564 
1565 /*
1566  * Interrupt controller XSCOM region.
1567  */
1568 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1569 {
1570     switch (addr >> 3) {
1571     case X_VC_EQC_CONFIG:
1572         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1573         return VC_EQC_SYNC_MASK;
1574     default:
1575         return pnv_xive_ic_reg_read(opaque, addr, size);
1576     }
1577 }
1578 
1579 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1580                                 uint64_t val, unsigned size)
1581 {
1582     pnv_xive_ic_reg_write(opaque, addr, val, size);
1583 }
1584 
1585 static const MemoryRegionOps pnv_xive_xscom_ops = {
1586     .read = pnv_xive_xscom_read,
1587     .write = pnv_xive_xscom_write,
1588     .endianness = DEVICE_BIG_ENDIAN,
1589     .valid = {
1590         .min_access_size = 8,
1591         .max_access_size = 8,
1592     },
1593     .impl = {
1594         .min_access_size = 8,
1595         .max_access_size = 8,
1596     }
1597 };
1598 
1599 /*
1600  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1601  */
1602 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1603                                  unsigned size)
1604 {
1605     PnvXive *xive = PNV_XIVE(opaque);
1606     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1607     uint64_t edt_type = 0;
1608     uint64_t edt_offset;
1609     MemTxResult result;
1610     AddressSpace *edt_as = NULL;
1611     uint64_t ret = -1;
1612 
1613     if (edt_index < XIVE_TABLE_EDT_MAX) {
1614         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1615     }
1616 
1617     switch (edt_type) {
1618     case CQ_TDR_EDT_IPI:
1619         edt_as = &xive->ipi_as;
1620         break;
1621     case CQ_TDR_EDT_EQ:
1622         edt_as = &xive->end_as;
1623         break;
1624     default:
1625         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1626         return -1;
1627     }
1628 
1629     /* Remap the offset for the targeted address space */
1630     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1631 
1632     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1633                             &result);
1634 
1635     if (result != MEMTX_OK) {
1636         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1637                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1638                    offset, edt_offset);
1639         return -1;
1640     }
1641 
1642     return ret;
1643 }
1644 
1645 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1646                               uint64_t val, unsigned size)
1647 {
1648     PnvXive *xive = PNV_XIVE(opaque);
1649     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1650     uint64_t edt_type = 0;
1651     uint64_t edt_offset;
1652     MemTxResult result;
1653     AddressSpace *edt_as = NULL;
1654 
1655     if (edt_index < XIVE_TABLE_EDT_MAX) {
1656         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1657     }
1658 
1659     switch (edt_type) {
1660     case CQ_TDR_EDT_IPI:
1661         edt_as = &xive->ipi_as;
1662         break;
1663     case CQ_TDR_EDT_EQ:
1664         edt_as = &xive->end_as;
1665         break;
1666     default:
1667         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1668                    offset);
1669         return;
1670     }
1671 
1672     /* Remap the offset for the targeted address space */
1673     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1674 
1675     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1676     if (result != MEMTX_OK) {
1677         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1678     }
1679 }
1680 
1681 static const MemoryRegionOps pnv_xive_vc_ops = {
1682     .read = pnv_xive_vc_read,
1683     .write = pnv_xive_vc_write,
1684     .endianness = DEVICE_BIG_ENDIAN,
1685     .valid = {
1686         .min_access_size = 8,
1687         .max_access_size = 8,
1688     },
1689     .impl = {
1690         .min_access_size = 8,
1691         .max_access_size = 8,
1692     },
1693 };
1694 
1695 /*
1696  * Presenter Controller MMIO region. The Virtualization Controller
1697  * updates the IPB in the NVT table when required. Not modeled.
1698  */
1699 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1700                                  unsigned size)
1701 {
1702     PnvXive *xive = PNV_XIVE(opaque);
1703 
1704     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1705     return -1;
1706 }
1707 
1708 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1709                               uint64_t value, unsigned size)
1710 {
1711     PnvXive *xive = PNV_XIVE(opaque);
1712 
1713     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1714 }
1715 
1716 static const MemoryRegionOps pnv_xive_pc_ops = {
1717     .read = pnv_xive_pc_read,
1718     .write = pnv_xive_pc_write,
1719     .endianness = DEVICE_BIG_ENDIAN,
1720     .valid = {
1721         .min_access_size = 8,
1722         .max_access_size = 8,
1723     },
1724     .impl = {
1725         .min_access_size = 8,
1726         .max_access_size = 8,
1727     },
1728 };
1729 
1730 static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1731                                     Monitor *mon)
1732 {
1733     uint8_t  eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1734     uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1735 
1736     if (!xive_nvt_is_valid(nvt)) {
1737         return;
1738     }
1739 
1740     monitor_printf(mon, "  %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1741                    eq_blk, eq_idx,
1742                    xive_get_field32(NVT_W4_IPB, nvt->w4));
1743 }
1744 
1745 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1746 {
1747     XiveRouter *xrtr = XIVE_ROUTER(xive);
1748     uint8_t blk = pnv_xive_block_id(xive);
1749     uint8_t chip_id = xive->chip->chip_id;
1750     uint32_t srcno0 = XIVE_EAS(blk, 0);
1751     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1752     XiveEAS eas;
1753     XiveEND end;
1754     XiveNVT nvt;
1755     int i;
1756     uint64_t xive_nvt_per_subpage;
1757 
1758     monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1759                    srcno0, srcno0 + nr_ipis - 1);
1760     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1761 
1762     monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1763                    srcno0, srcno0 + nr_ipis - 1);
1764     for (i = 0; i < nr_ipis; i++) {
1765         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1766             break;
1767         }
1768         if (!xive_eas_is_masked(&eas)) {
1769             xive_eas_pic_print_info(&eas, i, mon);
1770         }
1771     }
1772 
1773     monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1774     i = 0;
1775     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1776         xive_end_pic_print_info(&end, i++, mon);
1777     }
1778 
1779     monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1780     i = 0;
1781     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1782         xive_end_eas_pic_print_info(&end, i++, mon);
1783     }
1784 
1785     monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1786                    0, XIVE_NVT_COUNT - 1);
1787     xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1788     for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1789         while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1790             xive_nvt_pic_print_info(&nvt, i++, mon);
1791         }
1792     }
1793 }
1794 
1795 static void pnv_xive_reset(void *dev)
1796 {
1797     PnvXive *xive = PNV_XIVE(dev);
1798     XiveSource *xsrc = &xive->ipi_source;
1799     XiveENDSource *end_xsrc = &xive->end_source;
1800 
1801     /* Default page size (Should be changed at runtime to 64k) */
1802     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1803 
1804     /* Clear subregions */
1805     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1806         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1807     }
1808 
1809     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1810         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1811     }
1812 
1813     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1814         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1815     }
1816 
1817     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1818         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1819     }
1820 }
1821 
1822 static void pnv_xive_init(Object *obj)
1823 {
1824     PnvXive *xive = PNV_XIVE(obj);
1825 
1826     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1827                             TYPE_XIVE_SOURCE);
1828     object_initialize_child(obj, "end_source", &xive->end_source,
1829                             TYPE_XIVE_END_SOURCE);
1830 }
1831 
1832 /*
1833  *  Maximum number of IRQs and ENDs supported by HW
1834  */
1835 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1836 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1837 
1838 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1839 {
1840     PnvXive *xive = PNV_XIVE(dev);
1841     PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1842     XiveSource *xsrc = &xive->ipi_source;
1843     XiveENDSource *end_xsrc = &xive->end_source;
1844     Error *local_err = NULL;
1845 
1846     pxc->parent_realize(dev, &local_err);
1847     if (local_err) {
1848         error_propagate(errp, local_err);
1849         return;
1850     }
1851 
1852     assert(xive->chip);
1853 
1854     /*
1855      * The XiveSource and XiveENDSource objects are realized with the
1856      * maximum allowed HW configuration. The ESB MMIO regions will be
1857      * resized dynamically when the controller is configured by the FW
1858      * to limit accesses to resources not provisioned.
1859      */
1860     object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1861                             &error_fatal);
1862     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1863     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1864         return;
1865     }
1866 
1867     object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1868                             &error_fatal);
1869     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1870                              &error_abort);
1871     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1872         return;
1873     }
1874 
1875     /* Default page size. Generally changed at runtime to 64k */
1876     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1877 
1878     /* XSCOM region, used for initial configuration of the BARs */
1879     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1880                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1881 
1882     /* Interrupt controller MMIO regions */
1883     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1884                        PNV9_XIVE_IC_SIZE);
1885 
1886     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1887                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1888     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1889                           &pnv_xive_ic_notify_ops,
1890                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1891 
1892     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1893     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1894                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1895 
1896     /* Thread Interrupt Management Area (Indirect) */
1897     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1898                           &xive_tm_indirect_ops,
1899                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1900     /*
1901      * Overall Virtualization Controller MMIO region containing the
1902      * IPI ESB pages and END ESB pages. The layout is defined by the
1903      * EDT "Domain table" and the accesses are dispatched using
1904      * address spaces for each.
1905      */
1906     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1907                           "xive-vc", PNV9_XIVE_VC_SIZE);
1908 
1909     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1910                        PNV9_XIVE_VC_SIZE);
1911     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1912     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1913                        PNV9_XIVE_VC_SIZE);
1914     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1915 
1916     /*
1917      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1918      * VC region. Their size is configured by the FW in the EDT table.
1919      */
1920     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1921     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1922 
1923     /* Presenter Controller MMIO region (not modeled) */
1924     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1925                           "xive-pc", PNV9_XIVE_PC_SIZE);
1926 
1927     /* Thread Interrupt Management Area (Direct) */
1928     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1929                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1930 
1931     qemu_register_reset(pnv_xive_reset, dev);
1932 }
1933 
1934 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1935                              int xscom_offset)
1936 {
1937     const char compat[] = "ibm,power9-xive-x";
1938     char *name;
1939     int offset;
1940     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1941     uint32_t reg[] = {
1942         cpu_to_be32(lpc_pcba),
1943         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1944     };
1945 
1946     name = g_strdup_printf("xive@%x", lpc_pcba);
1947     offset = fdt_add_subnode(fdt, xscom_offset, name);
1948     _FDT(offset);
1949     g_free(name);
1950 
1951     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1952     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1953                       sizeof(compat))));
1954     return 0;
1955 }
1956 
1957 static Property pnv_xive_properties[] = {
1958     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1959     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1960     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1961     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1962     /* The PnvChip id identifies the XIVE interrupt controller. */
1963     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1964     DEFINE_PROP_END_OF_LIST(),
1965 };
1966 
1967 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1968 {
1969     DeviceClass *dc = DEVICE_CLASS(klass);
1970     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1971     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1972     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1973     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1974     PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
1975 
1976     xdc->dt_xscom = pnv_xive_dt_xscom;
1977 
1978     dc->desc = "PowerNV XIVE Interrupt Controller";
1979     device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
1980     dc->realize = pnv_xive_realize;
1981     device_class_set_props(dc, pnv_xive_properties);
1982 
1983     xrc->get_eas = pnv_xive_get_eas;
1984     xrc->get_pq = pnv_xive_get_pq;
1985     xrc->set_pq = pnv_xive_set_pq;
1986     xrc->get_end = pnv_xive_get_end;
1987     xrc->write_end = pnv_xive_write_end;
1988     xrc->get_nvt = pnv_xive_get_nvt;
1989     xrc->write_nvt = pnv_xive_write_nvt;
1990     xrc->get_block_id = pnv_xive_get_block_id;
1991 
1992     xnc->notify = pnv_xive_notify;
1993     xpc->match_nvt  = pnv_xive_match_nvt;
1994 };
1995 
1996 static const TypeInfo pnv_xive_info = {
1997     .name          = TYPE_PNV_XIVE,
1998     .parent        = TYPE_XIVE_ROUTER,
1999     .instance_init = pnv_xive_init,
2000     .instance_size = sizeof(PnvXive),
2001     .class_init    = pnv_xive_class_init,
2002     .class_size    = sizeof(PnvXiveClass),
2003     .interfaces    = (InterfaceInfo[]) {
2004         { TYPE_PNV_XSCOM_INTERFACE },
2005         { }
2006     }
2007 };
2008 
2009 static void pnv_xive_register_types(void)
2010 {
2011     type_register_static(&pnv_xive_info);
2012 }
2013 
2014 type_init(pnv_xive_register_types)
2015