xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision f87dae18d8675f8fef7b34e713d3951fb594d5be)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 
28 #include <libfdt.h>
29 
30 #include "pnv_xive_regs.h"
31 
32 #undef XIVE_DEBUG
33 
34 /*
35  * Virtual structures table (VST)
36  */
37 #define SBE_PER_BYTE   4
38 
39 typedef struct XiveVstInfo {
40     const char *name;
41     uint32_t    size;
42     uint32_t    max_blocks;
43 } XiveVstInfo;
44 
45 static const XiveVstInfo vst_infos[] = {
46     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
47     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
48     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
49     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
50 
51     /*
52      *  Interrupt fifo backing store table (not modeled) :
53      *
54      * 0 - IPI,
55      * 1 - HWD,
56      * 2 - First escalate,
57      * 3 - Second escalate,
58      * 4 - Redistribution,
59      * 5 - IPI cascaded queue ?
60      */
61     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
62 };
63 
64 #define xive_error(xive, fmt, ...)                                      \
65     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
66                   (xive)->chip->chip_id, ## __VA_ARGS__);
67 
68 /*
69  * QEMU version of the GETFIELD/SETFIELD macros
70  *
71  * TODO: It might be better to use the existing extract64() and
72  * deposit64() but this means that all the register definitions will
73  * change and become incompatible with the ones found in skiboot.
74  *
75  * Keep it as it is for now until we find a common ground.
76  */
77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
78 {
79     return (word & mask) >> ctz64(mask);
80 }
81 
82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
83                                 uint64_t value)
84 {
85     return (word & ~mask) | ((value << ctz64(mask)) & mask);
86 }
87 
88 /*
89  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90  * of the chips is good enough.
91  *
92  * TODO: Block scope support
93  */
94 static PnvXive *pnv_xive_get_ic(uint8_t blk)
95 {
96     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
97     int i;
98 
99     for (i = 0; i < pnv->num_chips; i++) {
100         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
101         PnvXive *xive = &chip9->xive;
102 
103         if (xive->chip->chip_id == blk) {
104             return xive;
105         }
106     }
107     return NULL;
108 }
109 
110 /*
111  * VST accessors for SBE, EAT, ENDT, NVT
112  *
113  * Indirect VST tables are arrays of VSDs pointing to a page (of same
114  * size). Each page is a direct VST table.
115  */
116 
117 #define XIVE_VSD_SIZE 8
118 
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
121 {
122      return page_shift == 12 || page_shift == 16 ||
123          page_shift == 21 || page_shift == 24;
124 }
125 
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
127                                          uint64_t vsd, uint32_t idx)
128 {
129     const XiveVstInfo *info = &vst_infos[type];
130     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
131     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
132     uint32_t idx_max;
133 
134     idx_max = vst_tsize / info->size - 1;
135     if (idx > idx_max) {
136 #ifdef XIVE_DEBUG
137         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138                    info->name, idx, idx_max);
139 #endif
140         return 0;
141     }
142 
143     return vst_addr + idx * info->size;
144 }
145 
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
147                                            uint64_t vsd, uint32_t idx)
148 {
149     const XiveVstInfo *info = &vst_infos[type];
150     uint64_t vsd_addr;
151     uint32_t vsd_idx;
152     uint32_t page_shift;
153     uint32_t vst_per_page;
154 
155     /* Get the page size of the indirect table. */
156     vsd_addr = vsd & VSD_ADDRESS_MASK;
157     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
158 
159     if (!(vsd & VSD_ADDRESS_MASK)) {
160 #ifdef XIVE_DEBUG
161         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
162 #endif
163         return 0;
164     }
165 
166     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
167 
168     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
169         xive_error(xive, "VST: invalid %s page shift %d", info->name,
170                    page_shift);
171         return 0;
172     }
173 
174     vst_per_page = (1ull << page_shift) / info->size;
175     vsd_idx = idx / vst_per_page;
176 
177     /* Load the VSD we are looking for, if not already done */
178     if (vsd_idx) {
179         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
180         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
181 
182         if (!(vsd & VSD_ADDRESS_MASK)) {
183 #ifdef XIVE_DEBUG
184             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
185 #endif
186             return 0;
187         }
188 
189         /*
190          * Check that the pages have a consistent size across the
191          * indirect table
192          */
193         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
194             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
195                        info->name, idx);
196             return 0;
197         }
198     }
199 
200     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
201 }
202 
203 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
204                                   uint32_t idx)
205 {
206     const XiveVstInfo *info = &vst_infos[type];
207     uint64_t vsd;
208 
209     if (blk >= info->max_blocks) {
210         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
211                    blk, info->name, idx);
212         return 0;
213     }
214 
215     vsd = xive->vsds[type][blk];
216 
217     /* Remote VST access */
218     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
219         xive = pnv_xive_get_ic(blk);
220 
221         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
222     }
223 
224     if (VSD_INDIRECT & vsd) {
225         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
226     }
227 
228     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
229 }
230 
231 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
232                              uint32_t idx, void *data)
233 {
234     const XiveVstInfo *info = &vst_infos[type];
235     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
236 
237     if (!addr) {
238         return -1;
239     }
240 
241     cpu_physical_memory_read(addr, data, info->size);
242     return 0;
243 }
244 
245 #define XIVE_VST_WORD_ALL -1
246 
247 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
248                               uint32_t idx, void *data, uint32_t word_number)
249 {
250     const XiveVstInfo *info = &vst_infos[type];
251     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
252 
253     if (!addr) {
254         return -1;
255     }
256 
257     if (word_number == XIVE_VST_WORD_ALL) {
258         cpu_physical_memory_write(addr, data, info->size);
259     } else {
260         cpu_physical_memory_write(addr + word_number * 4,
261                                   data + word_number * 4, 4);
262     }
263     return 0;
264 }
265 
266 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
267                             XiveEND *end)
268 {
269     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
270 }
271 
272 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
273                               XiveEND *end, uint8_t word_number)
274 {
275     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
276                               word_number);
277 }
278 
279 static int pnv_xive_end_update(PnvXive *xive)
280 {
281     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
282                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
283     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
284                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
285     int i;
286     uint64_t eqc_watch[4];
287 
288     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
289         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
290     }
291 
292     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
293                               XIVE_VST_WORD_ALL);
294 }
295 
296 static void pnv_xive_end_cache_load(PnvXive *xive)
297 {
298     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
299                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
300     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     uint64_t eqc_watch[4] = { 0 };
303     int i;
304 
305     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
306         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
307     }
308 
309     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
310         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
311     }
312 }
313 
314 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
315                             XiveNVT *nvt)
316 {
317     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
318 }
319 
320 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
321                               XiveNVT *nvt, uint8_t word_number)
322 {
323     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
324                               word_number);
325 }
326 
327 static int pnv_xive_nvt_update(PnvXive *xive)
328 {
329     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
330                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
331     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
332                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
333     int i;
334     uint64_t vpc_watch[8];
335 
336     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
337         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
338     }
339 
340     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
341                               XIVE_VST_WORD_ALL);
342 }
343 
344 static void pnv_xive_nvt_cache_load(PnvXive *xive)
345 {
346     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
347                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
348     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     uint64_t vpc_watch[8] = { 0 };
351     int i;
352 
353     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
354         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
355     }
356 
357     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
358         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
359     }
360 }
361 
362 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
363                             XiveEAS *eas)
364 {
365     PnvXive *xive = PNV_XIVE(xrtr);
366 
367     if (pnv_xive_get_ic(blk) != xive) {
368         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
369         return -1;
370     }
371 
372     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
373 }
374 
375 static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
376                               uint8_t nvt_blk, uint32_t nvt_idx,
377                               bool cam_ignore, uint8_t priority,
378                               uint32_t logic_serv, XiveTCTXMatch *match)
379 {
380     CPUState *cs;
381     int count = 0;
382 
383     CPU_FOREACH(cs) {
384         PowerPCCPU *cpu = POWERPC_CPU(cs);
385         XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
386         int ring;
387 
388         /*
389          * Check the thread context CAM lines and record matches.
390          */
391         ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
392                                          cam_ignore, logic_serv);
393         /*
394          * Save the context and follow on to catch duplicates, that we
395          * don't support yet.
396          */
397         if (ring != -1) {
398             if (match->tctx) {
399                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
400                               "thread context NVT %x/%x\n",
401                               nvt_blk, nvt_idx);
402                 return -1;
403             }
404 
405             match->ring = ring;
406             match->tctx = tctx;
407             count++;
408         }
409     }
410 
411     return count;
412 }
413 
414 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
415 {
416     PowerPCCPU *cpu = POWERPC_CPU(cs);
417     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
418     PnvXive *xive = NULL;
419     CPUPPCState *env = &cpu->env;
420     int pir = env->spr_cb[SPR_PIR].default_value;
421 
422     /*
423      * Perform an extra check on the HW thread enablement.
424      *
425      * The TIMA is shared among the chips and to identify the chip
426      * from which the access is being done, we extract the chip id
427      * from the PIR.
428      */
429     xive = pnv_xive_get_ic((pir >> 8) & 0xf);
430     if (!xive) {
431         return NULL;
432     }
433 
434     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
435         xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
436     }
437 
438     return tctx;
439 }
440 
441 /*
442  * The internal sources (IPIs) of the interrupt controller have no
443  * knowledge of the XIVE chip on which they reside. Encode the block
444  * id in the source interrupt number before forwarding the source
445  * event notification to the Router. This is required on a multichip
446  * system.
447  */
448 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
449 {
450     PnvXive *xive = PNV_XIVE(xn);
451     uint8_t blk = xive->chip->chip_id;
452 
453     xive_router_notify(xn, XIVE_EAS(blk, srcno));
454 }
455 
456 /*
457  * XIVE helpers
458  */
459 
460 static uint64_t pnv_xive_vc_size(PnvXive *xive)
461 {
462     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
463 }
464 
465 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
466 {
467     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
468 }
469 
470 static uint64_t pnv_xive_pc_size(PnvXive *xive)
471 {
472     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
473 }
474 
475 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
476 {
477     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
478     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
479 
480     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
481 }
482 
483 /*
484  * EDT Table
485  *
486  * The Virtualization Controller MMIO region containing the IPI ESB
487  * pages and END ESB pages is sub-divided into "sets" which map
488  * portions of the VC region to the different ESB pages. It is
489  * configured at runtime through the EDT "Domain Table" to let the
490  * firmware decide how to split the VC address space between IPI ESB
491  * pages and END ESB pages.
492  */
493 
494 /*
495  * Computes the overall size of the IPI or the END ESB pages
496  */
497 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
498 {
499     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
500     uint64_t size = 0;
501     int i;
502 
503     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
504         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
505 
506         if (edt_type == type) {
507             size += edt_size;
508         }
509     }
510 
511     return size;
512 }
513 
514 /*
515  * Maps an offset of the VC region in the IPI or END region using the
516  * layout defined by the EDT "Domaine Table"
517  */
518 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
519                                               uint64_t type)
520 {
521     int i;
522     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
523     uint64_t edt_offset = vc_offset;
524 
525     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
526         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
527 
528         if (edt_type != type) {
529             edt_offset -= edt_size;
530         }
531     }
532 
533     return edt_offset;
534 }
535 
536 static void pnv_xive_edt_resize(PnvXive *xive)
537 {
538     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
539     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
540 
541     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
542     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
543 
544     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
545     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
546 }
547 
548 /*
549  * XIVE Table configuration. Only EDT is supported.
550  */
551 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
552 {
553     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
554     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
555     uint64_t *xive_table;
556     uint8_t max_index;
557 
558     switch (tsel) {
559     case CQ_TAR_TSEL_BLK:
560         max_index = ARRAY_SIZE(xive->blk);
561         xive_table = xive->blk;
562         break;
563     case CQ_TAR_TSEL_MIG:
564         max_index = ARRAY_SIZE(xive->mig);
565         xive_table = xive->mig;
566         break;
567     case CQ_TAR_TSEL_EDT:
568         max_index = ARRAY_SIZE(xive->edt);
569         xive_table = xive->edt;
570         break;
571     case CQ_TAR_TSEL_VDT:
572         max_index = ARRAY_SIZE(xive->vdt);
573         xive_table = xive->vdt;
574         break;
575     default:
576         xive_error(xive, "IC: invalid table %d", (int) tsel);
577         return -1;
578     }
579 
580     if (tsel_index >= max_index) {
581         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
582         return -1;
583     }
584 
585     xive_table[tsel_index] = val;
586 
587     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
588         xive->regs[CQ_TAR >> 3] =
589             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
590     }
591 
592     /*
593      * EDT configuration is complete. Resize the MMIO windows exposing
594      * the IPI and the END ESBs in the VC region.
595      */
596     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
597         pnv_xive_edt_resize(xive);
598     }
599 
600     return 0;
601 }
602 
603 /*
604  * Virtual Structure Tables (VST) configuration
605  */
606 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
607                                        uint8_t blk, uint64_t vsd)
608 {
609     XiveENDSource *end_xsrc = &xive->end_source;
610     XiveSource *xsrc = &xive->ipi_source;
611     const XiveVstInfo *info = &vst_infos[type];
612     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
613     uint64_t vst_tsize = 1ull << page_shift;
614     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
615 
616     /* Basic checks */
617 
618     if (VSD_INDIRECT & vsd) {
619         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
620             xive_error(xive, "VST: %s indirect tables are not enabled",
621                        info->name);
622             return;
623         }
624 
625         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
626             xive_error(xive, "VST: invalid %s page shift %d", info->name,
627                        page_shift);
628             return;
629         }
630     }
631 
632     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
633         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
634                    " page shift %d", info->name, vst_addr, page_shift);
635         return;
636     }
637 
638     /* Record the table configuration (in SRAM on HW) */
639     xive->vsds[type][blk] = vsd;
640 
641     /* Now tune the models with the configuration provided by the FW */
642 
643     switch (type) {
644     case VST_TSEL_IVT:  /* Nothing to be done */
645         break;
646 
647     case VST_TSEL_EQDT:
648         /*
649          * Backing store pages for the END.
650          *
651          * If the table is direct, we can compute the number of PQ
652          * entries provisioned by FW (such as skiboot) and resize the
653          * END ESB window accordingly.
654          */
655         if (!(VSD_INDIRECT & vsd)) {
656             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
657                                    * (1ull << xsrc->esb_shift));
658         }
659         memory_region_add_subregion(&xive->end_edt_mmio, 0,
660                                     &end_xsrc->esb_mmio);
661         break;
662 
663     case VST_TSEL_SBE:
664         /*
665          * Backing store pages for the source PQ bits. The model does
666          * not use these PQ bits backed in RAM because the XiveSource
667          * model has its own.
668          *
669          * If the table is direct, we can compute the number of PQ
670          * entries provisioned by FW (such as skiboot) and resize the
671          * ESB window accordingly.
672          */
673         if (!(VSD_INDIRECT & vsd)) {
674             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
675                                    * (1ull << xsrc->esb_shift));
676         }
677         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
678         break;
679 
680     case VST_TSEL_VPDT: /* Not modeled */
681     case VST_TSEL_IRQ:  /* Not modeled */
682         /*
683          * These tables contains the backing store pages for the
684          * interrupt fifos of the VC sub-engine in case of overflow.
685          */
686         break;
687 
688     default:
689         g_assert_not_reached();
690     }
691 }
692 
693 /*
694  * Both PC and VC sub-engines are configured as each use the Virtual
695  * Structure Tables : SBE, EAS, END and NVT.
696  */
697 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
698 {
699     uint8_t mode = GETFIELD(VSD_MODE, vsd);
700     uint8_t type = GETFIELD(VST_TABLE_SELECT,
701                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
702     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
703                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
704     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
705 
706     if (type > VST_TSEL_IRQ) {
707         xive_error(xive, "VST: invalid table type %d", type);
708         return;
709     }
710 
711     if (blk >= vst_infos[type].max_blocks) {
712         xive_error(xive, "VST: invalid block id %d for"
713                       " %s table", blk, vst_infos[type].name);
714         return;
715     }
716 
717     /*
718      * Only take the VC sub-engine configuration into account because
719      * the XiveRouter model combines both VC and PC sub-engines
720      */
721     if (pc_engine) {
722         return;
723     }
724 
725     if (!vst_addr) {
726         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
727         return;
728     }
729 
730     switch (mode) {
731     case VSD_MODE_FORWARD:
732         xive->vsds[type][blk] = vsd;
733         break;
734 
735     case VSD_MODE_EXCLUSIVE:
736         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
737         break;
738 
739     default:
740         xive_error(xive, "VST: unsupported table mode %d", mode);
741         return;
742     }
743 }
744 
745 /*
746  * Interrupt controller MMIO region. The layout is compatible between
747  * 4K and 64K pages :
748  *
749  * Page 0           sub-engine BARs
750  *  0x000 - 0x3FF   IC registers
751  *  0x400 - 0x7FF   PC registers
752  *  0x800 - 0xFFF   VC registers
753  *
754  * Page 1           Notify page (writes only)
755  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
756  *  0x800 - 0xFFF   forwards and syncs
757  *
758  * Page 2           LSI Trigger page (writes only) (not modeled)
759  * Page 3           LSI SB EOI page (reads only) (not modeled)
760  *
761  * Page 4-7         indirect TIMA
762  */
763 
764 /*
765  * IC - registers MMIO
766  */
767 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
768                                   uint64_t val, unsigned size)
769 {
770     PnvXive *xive = PNV_XIVE(opaque);
771     MemoryRegion *sysmem = get_system_memory();
772     uint32_t reg = offset >> 3;
773     bool is_chip0 = xive->chip->chip_id == 0;
774 
775     switch (offset) {
776 
777     /*
778      * XIVE CQ (PowerBus bridge) settings
779      */
780     case CQ_MSGSND:     /* msgsnd for doorbells */
781     case CQ_FIRMASK_OR: /* FIR error reporting */
782         break;
783     case CQ_PBI_CTL:
784         if (val & CQ_PBI_PC_64K) {
785             xive->pc_shift = 16;
786         }
787         if (val & CQ_PBI_VC_64K) {
788             xive->vc_shift = 16;
789         }
790         break;
791     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
792         /*
793          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
794          */
795         break;
796 
797     /*
798      * XIVE Virtualization Controller settings
799      */
800     case VC_GLOBAL_CONFIG:
801         break;
802 
803     /*
804      * XIVE Presenter Controller settings
805      */
806     case PC_GLOBAL_CONFIG:
807         /*
808          * PC_GCONF_CHIPID_OVR
809          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
810          */
811         break;
812     case PC_TCTXT_CFG:
813         /*
814          * TODO: block group support
815          *
816          * PC_TCTXT_CFG_BLKGRP_EN
817          * PC_TCTXT_CFG_HARD_CHIPID_BLK :
818          *   Moves the chipid into block field for hardwired CAM compares.
819          *   Block offset value is adjusted to 0b0..01 & ThrdId
820          *
821          *   Will require changes in xive_presenter_tctx_match(). I am
822          *   not sure how to handle that yet.
823          */
824 
825         /* Overrides hardwired chip ID with the chip ID field */
826         if (val & PC_TCTXT_CHIPID_OVERRIDE) {
827             xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
828         }
829         break;
830     case PC_TCTXT_TRACK:
831         /*
832          * PC_TCTXT_TRACK_EN:
833          *   enable block tracking and exchange of block ownership
834          *   information between Interrupt controllers
835          */
836         break;
837 
838     /*
839      * Misc settings
840      */
841     case VC_SBC_CONFIG: /* Store EOI configuration */
842         /*
843          * Configure store EOI if required by firwmare (skiboot has removed
844          * support recently though)
845          */
846         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
847             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
848         }
849         break;
850 
851     case VC_EQC_CONFIG: /* TODO: silent escalation */
852     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
853         break;
854 
855     /*
856      * XIVE BAR settings (XSCOM only)
857      */
858     case CQ_RST_CTL:
859         /* bit4: resets all BAR registers */
860         break;
861 
862     case CQ_IC_BAR: /* IC BAR. 8 pages */
863         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
864         if (!(val & CQ_IC_BAR_VALID)) {
865             xive->ic_base = 0;
866             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
867                 memory_region_del_subregion(&xive->ic_mmio,
868                                             &xive->ic_reg_mmio);
869                 memory_region_del_subregion(&xive->ic_mmio,
870                                             &xive->ic_notify_mmio);
871                 memory_region_del_subregion(&xive->ic_mmio,
872                                             &xive->ic_lsi_mmio);
873                 memory_region_del_subregion(&xive->ic_mmio,
874                                             &xive->tm_indirect_mmio);
875 
876                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
877             }
878         } else {
879             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
880             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
881                 memory_region_add_subregion(sysmem, xive->ic_base,
882                                             &xive->ic_mmio);
883 
884                 memory_region_add_subregion(&xive->ic_mmio,  0,
885                                             &xive->ic_reg_mmio);
886                 memory_region_add_subregion(&xive->ic_mmio,
887                                             1ul << xive->ic_shift,
888                                             &xive->ic_notify_mmio);
889                 memory_region_add_subregion(&xive->ic_mmio,
890                                             2ul << xive->ic_shift,
891                                             &xive->ic_lsi_mmio);
892                 memory_region_add_subregion(&xive->ic_mmio,
893                                             4ull << xive->ic_shift,
894                                             &xive->tm_indirect_mmio);
895             }
896         }
897         break;
898 
899     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
900     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
901         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
902         if (!(val & CQ_TM_BAR_VALID)) {
903             xive->tm_base = 0;
904             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
905                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
906             }
907         } else {
908             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
909             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
910                 memory_region_add_subregion(sysmem, xive->tm_base,
911                                             &xive->tm_mmio);
912             }
913         }
914         break;
915 
916     case CQ_PC_BARM:
917         xive->regs[reg] = val;
918         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
919         break;
920     case CQ_PC_BAR: /* From 32M to 512G */
921         if (!(val & CQ_PC_BAR_VALID)) {
922             xive->pc_base = 0;
923             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
924                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
925             }
926         } else {
927             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
928             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
929                 memory_region_add_subregion(sysmem, xive->pc_base,
930                                             &xive->pc_mmio);
931             }
932         }
933         break;
934 
935     case CQ_VC_BARM:
936         xive->regs[reg] = val;
937         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
938         break;
939     case CQ_VC_BAR: /* From 64M to 4TB */
940         if (!(val & CQ_VC_BAR_VALID)) {
941             xive->vc_base = 0;
942             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
943                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
944             }
945         } else {
946             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
947             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
948                 memory_region_add_subregion(sysmem, xive->vc_base,
949                                             &xive->vc_mmio);
950             }
951         }
952         break;
953 
954     /*
955      * XIVE Table settings.
956      */
957     case CQ_TAR: /* Table Address */
958         break;
959     case CQ_TDR: /* Table Data */
960         pnv_xive_table_set_data(xive, val);
961         break;
962 
963     /*
964      * XIVE VC & PC Virtual Structure Table settings
965      */
966     case VC_VSD_TABLE_ADDR:
967     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
968         break;
969     case VC_VSD_TABLE_DATA: /* Virtual table setting */
970     case PC_VSD_TABLE_DATA:
971         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
972         break;
973 
974     /*
975      * Interrupt fifo overflow in memory backing store (Not modeled)
976      */
977     case VC_IRQ_CONFIG_IPI:
978     case VC_IRQ_CONFIG_HW:
979     case VC_IRQ_CONFIG_CASCADE1:
980     case VC_IRQ_CONFIG_CASCADE2:
981     case VC_IRQ_CONFIG_REDIST:
982     case VC_IRQ_CONFIG_IPI_CASC:
983         break;
984 
985     /*
986      * XIVE hardware thread enablement
987      */
988     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
989     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
990         break;
991 
992     case PC_THREAD_EN_REG0_SET:
993         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
994         break;
995     case PC_THREAD_EN_REG1_SET:
996         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
997         break;
998     case PC_THREAD_EN_REG0_CLR:
999         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1000         break;
1001     case PC_THREAD_EN_REG1_CLR:
1002         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1003         break;
1004 
1005     /*
1006      * Indirect TIMA access set up. Defines the PIR of the HW thread
1007      * to use.
1008      */
1009     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1010         break;
1011 
1012     /*
1013      * XIVE PC & VC cache updates for EAS, NVT and END
1014      */
1015     case VC_IVC_SCRUB_MASK:
1016     case VC_IVC_SCRUB_TRIG:
1017         break;
1018 
1019     case VC_EQC_CWATCH_SPEC:
1020         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
1021         break;
1022     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1023         break;
1024     case VC_EQC_CWATCH_DAT0:
1025         /* writing to DATA0 triggers the cache write */
1026         xive->regs[reg] = val;
1027         pnv_xive_end_update(xive);
1028         break;
1029     case VC_EQC_SCRUB_MASK:
1030     case VC_EQC_SCRUB_TRIG:
1031         /*
1032          * The scrubbing registers flush the cache in RAM and can also
1033          * invalidate.
1034          */
1035         break;
1036 
1037     case PC_VPC_CWATCH_SPEC:
1038         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1039         break;
1040     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1041         break;
1042     case PC_VPC_CWATCH_DAT0:
1043         /* writing to DATA0 triggers the cache write */
1044         xive->regs[reg] = val;
1045         pnv_xive_nvt_update(xive);
1046         break;
1047     case PC_VPC_SCRUB_MASK:
1048     case PC_VPC_SCRUB_TRIG:
1049         /*
1050          * The scrubbing registers flush the cache in RAM and can also
1051          * invalidate.
1052          */
1053         break;
1054 
1055 
1056     /*
1057      * XIVE PC & VC cache invalidation
1058      */
1059     case PC_AT_KILL:
1060         break;
1061     case VC_AT_MACRO_KILL:
1062         break;
1063     case PC_AT_KILL_MASK:
1064     case VC_AT_MACRO_KILL_MASK:
1065         break;
1066 
1067     default:
1068         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1069         return;
1070     }
1071 
1072     xive->regs[reg] = val;
1073 }
1074 
1075 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1076 {
1077     PnvXive *xive = PNV_XIVE(opaque);
1078     uint64_t val = 0;
1079     uint32_t reg = offset >> 3;
1080 
1081     switch (offset) {
1082     case CQ_CFG_PB_GEN:
1083     case CQ_IC_BAR:
1084     case CQ_TM1_BAR:
1085     case CQ_TM2_BAR:
1086     case CQ_PC_BAR:
1087     case CQ_PC_BARM:
1088     case CQ_VC_BAR:
1089     case CQ_VC_BARM:
1090     case CQ_TAR:
1091     case CQ_TDR:
1092     case CQ_PBI_CTL:
1093 
1094     case PC_TCTXT_CFG:
1095     case PC_TCTXT_TRACK:
1096     case PC_TCTXT_INDIR0:
1097     case PC_TCTXT_INDIR1:
1098     case PC_TCTXT_INDIR2:
1099     case PC_TCTXT_INDIR3:
1100     case PC_GLOBAL_CONFIG:
1101 
1102     case PC_VPC_SCRUB_MASK:
1103 
1104     case VC_GLOBAL_CONFIG:
1105     case VC_AIB_TX_ORDER_TAG2:
1106 
1107     case VC_IRQ_CONFIG_IPI:
1108     case VC_IRQ_CONFIG_HW:
1109     case VC_IRQ_CONFIG_CASCADE1:
1110     case VC_IRQ_CONFIG_CASCADE2:
1111     case VC_IRQ_CONFIG_REDIST:
1112     case VC_IRQ_CONFIG_IPI_CASC:
1113 
1114     case VC_EQC_SCRUB_MASK:
1115     case VC_IVC_SCRUB_MASK:
1116     case VC_SBC_CONFIG:
1117     case VC_AT_MACRO_KILL_MASK:
1118     case VC_VSD_TABLE_ADDR:
1119     case PC_VSD_TABLE_ADDR:
1120     case VC_VSD_TABLE_DATA:
1121     case PC_VSD_TABLE_DATA:
1122     case PC_THREAD_EN_REG0:
1123     case PC_THREAD_EN_REG1:
1124         val = xive->regs[reg];
1125         break;
1126 
1127     /*
1128      * XIVE hardware thread enablement
1129      */
1130     case PC_THREAD_EN_REG0_SET:
1131     case PC_THREAD_EN_REG0_CLR:
1132         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1133         break;
1134     case PC_THREAD_EN_REG1_SET:
1135     case PC_THREAD_EN_REG1_CLR:
1136         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1137         break;
1138 
1139     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1140         val = 0xffffff0000000000;
1141         break;
1142 
1143     /*
1144      * XIVE PC & VC cache updates for EAS, NVT and END
1145      */
1146     case VC_EQC_CWATCH_SPEC:
1147         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1148         val = xive->regs[reg];
1149         break;
1150     case VC_EQC_CWATCH_DAT0:
1151         /*
1152          * Load DATA registers from cache with data requested by the
1153          * SPEC register
1154          */
1155         pnv_xive_end_cache_load(xive);
1156         val = xive->regs[reg];
1157         break;
1158     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1159         val = xive->regs[reg];
1160         break;
1161 
1162     case PC_VPC_CWATCH_SPEC:
1163         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1164         val = xive->regs[reg];
1165         break;
1166     case PC_VPC_CWATCH_DAT0:
1167         /*
1168          * Load DATA registers from cache with data requested by the
1169          * SPEC register
1170          */
1171         pnv_xive_nvt_cache_load(xive);
1172         val = xive->regs[reg];
1173         break;
1174     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1175         val = xive->regs[reg];
1176         break;
1177 
1178     case PC_VPC_SCRUB_TRIG:
1179     case VC_IVC_SCRUB_TRIG:
1180     case VC_EQC_SCRUB_TRIG:
1181         xive->regs[reg] &= ~VC_SCRUB_VALID;
1182         val = xive->regs[reg];
1183         break;
1184 
1185     /*
1186      * XIVE PC & VC cache invalidation
1187      */
1188     case PC_AT_KILL:
1189         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1190         val = xive->regs[reg];
1191         break;
1192     case VC_AT_MACRO_KILL:
1193         xive->regs[reg] &= ~VC_KILL_VALID;
1194         val = xive->regs[reg];
1195         break;
1196 
1197     /*
1198      * XIVE synchronisation
1199      */
1200     case VC_EQC_CONFIG:
1201         val = VC_EQC_SYNC_MASK;
1202         break;
1203 
1204     default:
1205         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1206     }
1207 
1208     return val;
1209 }
1210 
1211 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1212     .read = pnv_xive_ic_reg_read,
1213     .write = pnv_xive_ic_reg_write,
1214     .endianness = DEVICE_BIG_ENDIAN,
1215     .valid = {
1216         .min_access_size = 8,
1217         .max_access_size = 8,
1218     },
1219     .impl = {
1220         .min_access_size = 8,
1221         .max_access_size = 8,
1222     },
1223 };
1224 
1225 /*
1226  * IC - Notify MMIO port page (write only)
1227  */
1228 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1229 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1230 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1231 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1232 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1233 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1234 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1235 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1236 
1237 /* VC synchronisation */
1238 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1239 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1240 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1241 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1242 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1243 
1244 /* PC synchronisation */
1245 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1246 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1247 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1248 
1249 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1250 {
1251     uint8_t blk;
1252     uint32_t idx;
1253 
1254     if (val & XIVE_TRIGGER_END) {
1255         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1256                    addr, val);
1257         return;
1258     }
1259 
1260     /*
1261      * Forward the source event notification directly to the Router.
1262      * The source interrupt number should already be correctly encoded
1263      * with the chip block id by the sending device (PHB, PSI).
1264      */
1265     blk = XIVE_EAS_BLOCK(val);
1266     idx = XIVE_EAS_INDEX(val);
1267 
1268     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1269 }
1270 
1271 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1272                                      unsigned size)
1273 {
1274     PnvXive *xive = PNV_XIVE(opaque);
1275 
1276     /* VC: HW triggers */
1277     switch (addr) {
1278     case 0x000 ... 0x7FF:
1279         pnv_xive_ic_hw_trigger(opaque, addr, val);
1280         break;
1281 
1282     /* VC: Forwarded IRQs */
1283     case PNV_XIVE_FORWARD_IPI:
1284     case PNV_XIVE_FORWARD_HW:
1285     case PNV_XIVE_FORWARD_OS_ESC:
1286     case PNV_XIVE_FORWARD_HW_ESC:
1287     case PNV_XIVE_FORWARD_REDIS:
1288         /* TODO: forwarded IRQs. Should be like HW triggers */
1289         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1290                    addr, val);
1291         break;
1292 
1293     /* VC syncs */
1294     case PNV_XIVE_SYNC_IPI:
1295     case PNV_XIVE_SYNC_HW:
1296     case PNV_XIVE_SYNC_OS_ESC:
1297     case PNV_XIVE_SYNC_HW_ESC:
1298     case PNV_XIVE_SYNC_REDIS:
1299         break;
1300 
1301     /* PC syncs */
1302     case PNV_XIVE_SYNC_PULL:
1303     case PNV_XIVE_SYNC_PUSH:
1304     case PNV_XIVE_SYNC_VPC:
1305         break;
1306 
1307     default:
1308         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1309     }
1310 }
1311 
1312 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1313                                         unsigned size)
1314 {
1315     PnvXive *xive = PNV_XIVE(opaque);
1316 
1317     /* loads are invalid */
1318     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1319     return -1;
1320 }
1321 
1322 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1323     .read = pnv_xive_ic_notify_read,
1324     .write = pnv_xive_ic_notify_write,
1325     .endianness = DEVICE_BIG_ENDIAN,
1326     .valid = {
1327         .min_access_size = 8,
1328         .max_access_size = 8,
1329     },
1330     .impl = {
1331         .min_access_size = 8,
1332         .max_access_size = 8,
1333     },
1334 };
1335 
1336 /*
1337  * IC - LSI MMIO handlers (not modeled)
1338  */
1339 
1340 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1341                               uint64_t val, unsigned size)
1342 {
1343     PnvXive *xive = PNV_XIVE(opaque);
1344 
1345     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1346 }
1347 
1348 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1349 {
1350     PnvXive *xive = PNV_XIVE(opaque);
1351 
1352     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1353     return -1;
1354 }
1355 
1356 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1357     .read = pnv_xive_ic_lsi_read,
1358     .write = pnv_xive_ic_lsi_write,
1359     .endianness = DEVICE_BIG_ENDIAN,
1360     .valid = {
1361         .min_access_size = 8,
1362         .max_access_size = 8,
1363     },
1364     .impl = {
1365         .min_access_size = 8,
1366         .max_access_size = 8,
1367     },
1368 };
1369 
1370 /*
1371  * IC - Indirect TIMA MMIO handlers
1372  */
1373 
1374 /*
1375  * When the TIMA is accessed from the indirect page, the thread id
1376  * (PIR) has to be configured in the IC registers before. This is used
1377  * for resets and for debug purpose also.
1378  */
1379 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1380 {
1381     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1382     PowerPCCPU *cpu = NULL;
1383     int pir;
1384 
1385     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1386         xive_error(xive, "IC: no indirect TIMA access in progress");
1387         return NULL;
1388     }
1389 
1390     pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff;
1391     cpu = ppc_get_vcpu_by_pir(pir);
1392     if (!cpu) {
1393         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1394         return NULL;
1395     }
1396 
1397     /* Check that HW thread is XIVE enabled */
1398     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
1399         xive_error(xive, "IC: CPU %x is not enabled", pir);
1400     }
1401 
1402     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1403 }
1404 
1405 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1406                                    uint64_t value, unsigned size)
1407 {
1408     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1409 
1410     xive_tctx_tm_write(tctx, offset, value, size);
1411 }
1412 
1413 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1414                                       unsigned size)
1415 {
1416     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1417 
1418     return xive_tctx_tm_read(tctx, offset, size);
1419 }
1420 
1421 static const MemoryRegionOps xive_tm_indirect_ops = {
1422     .read = xive_tm_indirect_read,
1423     .write = xive_tm_indirect_write,
1424     .endianness = DEVICE_BIG_ENDIAN,
1425     .valid = {
1426         .min_access_size = 1,
1427         .max_access_size = 8,
1428     },
1429     .impl = {
1430         .min_access_size = 1,
1431         .max_access_size = 8,
1432     },
1433 };
1434 
1435 /*
1436  * Interrupt controller XSCOM region.
1437  */
1438 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1439 {
1440     switch (addr >> 3) {
1441     case X_VC_EQC_CONFIG:
1442         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1443         return VC_EQC_SYNC_MASK;
1444     default:
1445         return pnv_xive_ic_reg_read(opaque, addr, size);
1446     }
1447 }
1448 
1449 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1450                                 uint64_t val, unsigned size)
1451 {
1452     pnv_xive_ic_reg_write(opaque, addr, val, size);
1453 }
1454 
1455 static const MemoryRegionOps pnv_xive_xscom_ops = {
1456     .read = pnv_xive_xscom_read,
1457     .write = pnv_xive_xscom_write,
1458     .endianness = DEVICE_BIG_ENDIAN,
1459     .valid = {
1460         .min_access_size = 8,
1461         .max_access_size = 8,
1462     },
1463     .impl = {
1464         .min_access_size = 8,
1465         .max_access_size = 8,
1466     }
1467 };
1468 
1469 /*
1470  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1471  */
1472 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1473                                  unsigned size)
1474 {
1475     PnvXive *xive = PNV_XIVE(opaque);
1476     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1477     uint64_t edt_type = 0;
1478     uint64_t edt_offset;
1479     MemTxResult result;
1480     AddressSpace *edt_as = NULL;
1481     uint64_t ret = -1;
1482 
1483     if (edt_index < XIVE_TABLE_EDT_MAX) {
1484         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1485     }
1486 
1487     switch (edt_type) {
1488     case CQ_TDR_EDT_IPI:
1489         edt_as = &xive->ipi_as;
1490         break;
1491     case CQ_TDR_EDT_EQ:
1492         edt_as = &xive->end_as;
1493         break;
1494     default:
1495         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1496         return -1;
1497     }
1498 
1499     /* Remap the offset for the targeted address space */
1500     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1501 
1502     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1503                             &result);
1504 
1505     if (result != MEMTX_OK) {
1506         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1507                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1508                    offset, edt_offset);
1509         return -1;
1510     }
1511 
1512     return ret;
1513 }
1514 
1515 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1516                               uint64_t val, unsigned size)
1517 {
1518     PnvXive *xive = PNV_XIVE(opaque);
1519     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1520     uint64_t edt_type = 0;
1521     uint64_t edt_offset;
1522     MemTxResult result;
1523     AddressSpace *edt_as = NULL;
1524 
1525     if (edt_index < XIVE_TABLE_EDT_MAX) {
1526         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1527     }
1528 
1529     switch (edt_type) {
1530     case CQ_TDR_EDT_IPI:
1531         edt_as = &xive->ipi_as;
1532         break;
1533     case CQ_TDR_EDT_EQ:
1534         edt_as = &xive->end_as;
1535         break;
1536     default:
1537         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1538                    offset);
1539         return;
1540     }
1541 
1542     /* Remap the offset for the targeted address space */
1543     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1544 
1545     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1546     if (result != MEMTX_OK) {
1547         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1548     }
1549 }
1550 
1551 static const MemoryRegionOps pnv_xive_vc_ops = {
1552     .read = pnv_xive_vc_read,
1553     .write = pnv_xive_vc_write,
1554     .endianness = DEVICE_BIG_ENDIAN,
1555     .valid = {
1556         .min_access_size = 8,
1557         .max_access_size = 8,
1558     },
1559     .impl = {
1560         .min_access_size = 8,
1561         .max_access_size = 8,
1562     },
1563 };
1564 
1565 /*
1566  * Presenter Controller MMIO region. The Virtualization Controller
1567  * updates the IPB in the NVT table when required. Not modeled.
1568  */
1569 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1570                                  unsigned size)
1571 {
1572     PnvXive *xive = PNV_XIVE(opaque);
1573 
1574     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1575     return -1;
1576 }
1577 
1578 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1579                               uint64_t value, unsigned size)
1580 {
1581     PnvXive *xive = PNV_XIVE(opaque);
1582 
1583     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1584 }
1585 
1586 static const MemoryRegionOps pnv_xive_pc_ops = {
1587     .read = pnv_xive_pc_read,
1588     .write = pnv_xive_pc_write,
1589     .endianness = DEVICE_BIG_ENDIAN,
1590     .valid = {
1591         .min_access_size = 8,
1592         .max_access_size = 8,
1593     },
1594     .impl = {
1595         .min_access_size = 8,
1596         .max_access_size = 8,
1597     },
1598 };
1599 
1600 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1601 {
1602     XiveRouter *xrtr = XIVE_ROUTER(xive);
1603     uint8_t blk = xive->chip->chip_id;
1604     uint32_t srcno0 = XIVE_EAS(blk, 0);
1605     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1606     XiveEAS eas;
1607     XiveEND end;
1608     int i;
1609 
1610     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1611                    srcno0 + nr_ipis - 1);
1612     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1613 
1614     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1615                    srcno0 + nr_ipis - 1);
1616     for (i = 0; i < nr_ipis; i++) {
1617         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1618             break;
1619         }
1620         if (!xive_eas_is_masked(&eas)) {
1621             xive_eas_pic_print_info(&eas, i, mon);
1622         }
1623     }
1624 
1625     monitor_printf(mon, "XIVE[%x] ENDT\n", blk);
1626     i = 0;
1627     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1628         xive_end_pic_print_info(&end, i++, mon);
1629     }
1630 
1631     monitor_printf(mon, "XIVE[%x] END Escalation EAT\n", blk);
1632     i = 0;
1633     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1634         xive_end_eas_pic_print_info(&end, i++, mon);
1635     }
1636 }
1637 
1638 static void pnv_xive_reset(void *dev)
1639 {
1640     PnvXive *xive = PNV_XIVE(dev);
1641     XiveSource *xsrc = &xive->ipi_source;
1642     XiveENDSource *end_xsrc = &xive->end_source;
1643 
1644     /*
1645      * Use the PnvChip id to identify the XIVE interrupt controller.
1646      * It can be overriden by configuration at runtime.
1647      */
1648     xive->tctx_chipid = xive->chip->chip_id;
1649 
1650     /* Default page size (Should be changed at runtime to 64k) */
1651     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1652 
1653     /* Clear subregions */
1654     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1655         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1656     }
1657 
1658     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1659         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1660     }
1661 
1662     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1663         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1664     }
1665 
1666     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1667         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1668     }
1669 }
1670 
1671 static void pnv_xive_init(Object *obj)
1672 {
1673     PnvXive *xive = PNV_XIVE(obj);
1674 
1675     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1676                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1677                             &error_abort, NULL);
1678     object_initialize_child(obj, "end_source", &xive->end_source,
1679                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1680                             &error_abort, NULL);
1681 }
1682 
1683 /*
1684  *  Maximum number of IRQs and ENDs supported by HW
1685  */
1686 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1687 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1688 
1689 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1690 {
1691     PnvXive *xive = PNV_XIVE(dev);
1692     XiveSource *xsrc = &xive->ipi_source;
1693     XiveENDSource *end_xsrc = &xive->end_source;
1694     Error *local_err = NULL;
1695 
1696     assert(xive->chip);
1697 
1698     /*
1699      * The XiveSource and XiveENDSource objects are realized with the
1700      * maximum allowed HW configuration. The ESB MMIO regions will be
1701      * resized dynamically when the controller is configured by the FW
1702      * to limit accesses to resources not provisioned.
1703      */
1704     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1705                             &error_fatal);
1706     object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive",
1707                              &error_abort);
1708     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1709     if (local_err) {
1710         error_propagate(errp, local_err);
1711         return;
1712     }
1713 
1714     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1715                             &error_fatal);
1716     object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive",
1717                              &error_abort);
1718     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1719     if (local_err) {
1720         error_propagate(errp, local_err);
1721         return;
1722     }
1723 
1724     /* Default page size. Generally changed at runtime to 64k */
1725     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1726 
1727     /* XSCOM region, used for initial configuration of the BARs */
1728     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1729                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1730 
1731     /* Interrupt controller MMIO regions */
1732     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1733                        PNV9_XIVE_IC_SIZE);
1734 
1735     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1736                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1737     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1738                           &pnv_xive_ic_notify_ops,
1739                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1740 
1741     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1742     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1743                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1744 
1745     /* Thread Interrupt Management Area (Indirect) */
1746     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1747                           &xive_tm_indirect_ops,
1748                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1749     /*
1750      * Overall Virtualization Controller MMIO region containing the
1751      * IPI ESB pages and END ESB pages. The layout is defined by the
1752      * EDT "Domain table" and the accesses are dispatched using
1753      * address spaces for each.
1754      */
1755     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1756                           "xive-vc", PNV9_XIVE_VC_SIZE);
1757 
1758     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1759                        PNV9_XIVE_VC_SIZE);
1760     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1761     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1762                        PNV9_XIVE_VC_SIZE);
1763     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1764 
1765     /*
1766      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1767      * VC region. Their size is configured by the FW in the EDT table.
1768      */
1769     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1770     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1771 
1772     /* Presenter Controller MMIO region (not modeled) */
1773     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1774                           "xive-pc", PNV9_XIVE_PC_SIZE);
1775 
1776     /* Thread Interrupt Management Area (Direct) */
1777     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1778                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1779 
1780     qemu_register_reset(pnv_xive_reset, dev);
1781 }
1782 
1783 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1784                              int xscom_offset)
1785 {
1786     const char compat[] = "ibm,power9-xive-x";
1787     char *name;
1788     int offset;
1789     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1790     uint32_t reg[] = {
1791         cpu_to_be32(lpc_pcba),
1792         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1793     };
1794 
1795     name = g_strdup_printf("xive@%x", lpc_pcba);
1796     offset = fdt_add_subnode(fdt, xscom_offset, name);
1797     _FDT(offset);
1798     g_free(name);
1799 
1800     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1801     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1802                       sizeof(compat))));
1803     return 0;
1804 }
1805 
1806 static Property pnv_xive_properties[] = {
1807     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1808     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1809     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1810     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1811     /* The PnvChip id identifies the XIVE interrupt controller. */
1812     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1813     DEFINE_PROP_END_OF_LIST(),
1814 };
1815 
1816 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1817 {
1818     DeviceClass *dc = DEVICE_CLASS(klass);
1819     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1820     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1821     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1822     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1823 
1824     xdc->dt_xscom = pnv_xive_dt_xscom;
1825 
1826     dc->desc = "PowerNV XIVE Interrupt Controller";
1827     dc->realize = pnv_xive_realize;
1828     dc->props = pnv_xive_properties;
1829 
1830     xrc->get_eas = pnv_xive_get_eas;
1831     xrc->get_end = pnv_xive_get_end;
1832     xrc->write_end = pnv_xive_write_end;
1833     xrc->get_nvt = pnv_xive_get_nvt;
1834     xrc->write_nvt = pnv_xive_write_nvt;
1835     xrc->get_tctx = pnv_xive_get_tctx;
1836 
1837     xnc->notify = pnv_xive_notify;
1838     xpc->match_nvt  = pnv_xive_match_nvt;
1839 };
1840 
1841 static const TypeInfo pnv_xive_info = {
1842     .name          = TYPE_PNV_XIVE,
1843     .parent        = TYPE_XIVE_ROUTER,
1844     .instance_init = pnv_xive_init,
1845     .instance_size = sizeof(PnvXive),
1846     .class_init    = pnv_xive_class_init,
1847     .interfaces    = (InterfaceInfo[]) {
1848         { TYPE_PNV_XSCOM_INTERFACE },
1849         { }
1850     }
1851 };
1852 
1853 static void pnv_xive_register_types(void)
1854 {
1855     type_register_static(&pnv_xive_info);
1856 }
1857 
1858 type_init(pnv_xive_register_types)
1859