xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision 6a0acfff)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/ppc/ppc.h"
26 
27 #include <libfdt.h>
28 
29 #include "pnv_xive_regs.h"
30 
31 #define XIVE_DEBUG
32 
33 /*
34  * Virtual structures table (VST)
35  */
36 #define SBE_PER_BYTE   4
37 
38 typedef struct XiveVstInfo {
39     const char *name;
40     uint32_t    size;
41     uint32_t    max_blocks;
42 } XiveVstInfo;
43 
44 static const XiveVstInfo vst_infos[] = {
45     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
46     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
47     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
48     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
49 
50     /*
51      *  Interrupt fifo backing store table (not modeled) :
52      *
53      * 0 - IPI,
54      * 1 - HWD,
55      * 2 - First escalate,
56      * 3 - Second escalate,
57      * 4 - Redistribution,
58      * 5 - IPI cascaded queue ?
59      */
60     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
61 };
62 
63 #define xive_error(xive, fmt, ...)                                      \
64     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
65                   (xive)->chip->chip_id, ## __VA_ARGS__);
66 
67 /*
68  * QEMU version of the GETFIELD/SETFIELD macros
69  *
70  * TODO: It might be better to use the existing extract64() and
71  * deposit64() but this means that all the register definitions will
72  * change and become incompatible with the ones found in skiboot.
73  *
74  * Keep it as it is for now until we find a common ground.
75  */
76 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
77 {
78     return (word & mask) >> ctz64(mask);
79 }
80 
81 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
82                                 uint64_t value)
83 {
84     return (word & ~mask) | ((value << ctz64(mask)) & mask);
85 }
86 
87 /*
88  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
89  * of the chips is good enough.
90  *
91  * TODO: Block scope support
92  */
93 static PnvXive *pnv_xive_get_ic(uint8_t blk)
94 {
95     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
96     int i;
97 
98     for (i = 0; i < pnv->num_chips; i++) {
99         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
100         PnvXive *xive = &chip9->xive;
101 
102         if (xive->chip->chip_id == blk) {
103             return xive;
104         }
105     }
106     return NULL;
107 }
108 
109 /*
110  * VST accessors for SBE, EAT, ENDT, NVT
111  *
112  * Indirect VST tables are arrays of VSDs pointing to a page (of same
113  * size). Each page is a direct VST table.
114  */
115 
116 #define XIVE_VSD_SIZE 8
117 
118 /* Indirect page size can be 4K, 64K, 2M, 16M. */
119 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
120 {
121      return page_shift == 12 || page_shift == 16 ||
122          page_shift == 21 || page_shift == 24;
123 }
124 
125 static uint64_t pnv_xive_vst_size(uint64_t vsd)
126 {
127     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
128 
129     /*
130      * Read the first descriptor to get the page size of the indirect
131      * table.
132      */
133     if (VSD_INDIRECT & vsd) {
134         uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE;
135         uint32_t page_shift;
136 
137         vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK);
138         page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
139 
140         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
141             return 0;
142         }
143 
144         return nr_pages * (1ull << page_shift);
145     }
146 
147     return vst_tsize;
148 }
149 
150 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
151                                          uint64_t vsd, uint32_t idx)
152 {
153     const XiveVstInfo *info = &vst_infos[type];
154     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
155 
156     return vst_addr + idx * info->size;
157 }
158 
159 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
160                                            uint64_t vsd, uint32_t idx)
161 {
162     const XiveVstInfo *info = &vst_infos[type];
163     uint64_t vsd_addr;
164     uint32_t vsd_idx;
165     uint32_t page_shift;
166     uint32_t vst_per_page;
167 
168     /* Get the page size of the indirect table. */
169     vsd_addr = vsd & VSD_ADDRESS_MASK;
170     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
171 
172     if (!(vsd & VSD_ADDRESS_MASK)) {
173         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
174         return 0;
175     }
176 
177     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
178 
179     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
180         xive_error(xive, "VST: invalid %s page shift %d", info->name,
181                    page_shift);
182         return 0;
183     }
184 
185     vst_per_page = (1ull << page_shift) / info->size;
186     vsd_idx = idx / vst_per_page;
187 
188     /* Load the VSD we are looking for, if not already done */
189     if (vsd_idx) {
190         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
191         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
192 
193         if (!(vsd & VSD_ADDRESS_MASK)) {
194             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
195             return 0;
196         }
197 
198         /*
199          * Check that the pages have a consistent size across the
200          * indirect table
201          */
202         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
203             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
204                        info->name, idx);
205             return 0;
206         }
207     }
208 
209     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
210 }
211 
212 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
213                                   uint32_t idx)
214 {
215     const XiveVstInfo *info = &vst_infos[type];
216     uint64_t vsd;
217     uint32_t idx_max;
218 
219     if (blk >= info->max_blocks) {
220         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
221                    blk, info->name, idx);
222         return 0;
223     }
224 
225     vsd = xive->vsds[type][blk];
226 
227     /* Remote VST access */
228     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
229         xive = pnv_xive_get_ic(blk);
230 
231         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
232     }
233 
234     idx_max = pnv_xive_vst_size(vsd) / info->size - 1;
235     if (idx > idx_max) {
236 #ifdef XIVE_DEBUG
237         xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?",
238                    info->name, blk, idx, idx_max);
239 #endif
240         return 0;
241     }
242 
243     if (VSD_INDIRECT & vsd) {
244         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
245     }
246 
247     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
248 }
249 
250 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
251                              uint32_t idx, void *data)
252 {
253     const XiveVstInfo *info = &vst_infos[type];
254     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
255 
256     if (!addr) {
257         return -1;
258     }
259 
260     cpu_physical_memory_read(addr, data, info->size);
261     return 0;
262 }
263 
264 #define XIVE_VST_WORD_ALL -1
265 
266 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
267                               uint32_t idx, void *data, uint32_t word_number)
268 {
269     const XiveVstInfo *info = &vst_infos[type];
270     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
271 
272     if (!addr) {
273         return -1;
274     }
275 
276     if (word_number == XIVE_VST_WORD_ALL) {
277         cpu_physical_memory_write(addr, data, info->size);
278     } else {
279         cpu_physical_memory_write(addr + word_number * 4,
280                                   data + word_number * 4, 4);
281     }
282     return 0;
283 }
284 
285 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
286                             XiveEND *end)
287 {
288     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
289 }
290 
291 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
292                               XiveEND *end, uint8_t word_number)
293 {
294     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
295                               word_number);
296 }
297 
298 static int pnv_xive_end_update(PnvXive *xive)
299 {
300     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
303                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
304     int i;
305     uint64_t eqc_watch[4];
306 
307     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
308         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
309     }
310 
311     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
312                               XIVE_VST_WORD_ALL);
313 }
314 
315 static void pnv_xive_end_cache_load(PnvXive *xive)
316 {
317     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
318                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
319     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
320                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
321     uint64_t eqc_watch[4] = { 0 };
322     int i;
323 
324     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
325         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
326     }
327 
328     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
329         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
330     }
331 }
332 
333 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
334                             XiveNVT *nvt)
335 {
336     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
337 }
338 
339 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
340                               XiveNVT *nvt, uint8_t word_number)
341 {
342     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
343                               word_number);
344 }
345 
346 static int pnv_xive_nvt_update(PnvXive *xive)
347 {
348     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
351                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
352     int i;
353     uint64_t vpc_watch[8];
354 
355     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
356         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
357     }
358 
359     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
360                               XIVE_VST_WORD_ALL);
361 }
362 
363 static void pnv_xive_nvt_cache_load(PnvXive *xive)
364 {
365     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
366                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
367     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
368                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
369     uint64_t vpc_watch[8] = { 0 };
370     int i;
371 
372     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
373         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
374     }
375 
376     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
377         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
378     }
379 }
380 
381 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
382                             XiveEAS *eas)
383 {
384     PnvXive *xive = PNV_XIVE(xrtr);
385 
386     if (pnv_xive_get_ic(blk) != xive) {
387         xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx));
388         return -1;
389     }
390 
391     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
392 }
393 
394 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
395 {
396     PowerPCCPU *cpu = POWERPC_CPU(cs);
397     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
398     PnvXive *xive = NULL;
399     CPUPPCState *env = &cpu->env;
400     int pir = env->spr_cb[SPR_PIR].default_value;
401 
402     /*
403      * Perform an extra check on the HW thread enablement.
404      *
405      * The TIMA is shared among the chips and to identify the chip
406      * from which the access is being done, we extract the chip id
407      * from the PIR.
408      */
409     xive = pnv_xive_get_ic((pir >> 8) & 0xf);
410     if (!xive) {
411         return NULL;
412     }
413 
414     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
415         xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
416     }
417 
418     return tctx;
419 }
420 
421 /*
422  * The internal sources (IPIs) of the interrupt controller have no
423  * knowledge of the XIVE chip on which they reside. Encode the block
424  * id in the source interrupt number before forwarding the source
425  * event notification to the Router. This is required on a multichip
426  * system.
427  */
428 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
429 {
430     PnvXive *xive = PNV_XIVE(xn);
431     uint8_t blk = xive->chip->chip_id;
432 
433     xive_router_notify(xn, XIVE_SRCNO(blk, srcno));
434 }
435 
436 /*
437  * XIVE helpers
438  */
439 
440 static uint64_t pnv_xive_vc_size(PnvXive *xive)
441 {
442     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
443 }
444 
445 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
446 {
447     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
448 }
449 
450 static uint64_t pnv_xive_pc_size(PnvXive *xive)
451 {
452     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
453 }
454 
455 static uint32_t pnv_xive_nr_ipis(PnvXive *xive)
456 {
457     uint8_t blk = xive->chip->chip_id;
458 
459     return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE;
460 }
461 
462 static uint32_t pnv_xive_nr_ends(PnvXive *xive)
463 {
464     uint8_t blk = xive->chip->chip_id;
465 
466     return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk])
467         / vst_infos[VST_TSEL_EQDT].size;
468 }
469 
470 /*
471  * EDT Table
472  *
473  * The Virtualization Controller MMIO region containing the IPI ESB
474  * pages and END ESB pages is sub-divided into "sets" which map
475  * portions of the VC region to the different ESB pages. It is
476  * configured at runtime through the EDT "Domain Table" to let the
477  * firmware decide how to split the VC address space between IPI ESB
478  * pages and END ESB pages.
479  */
480 
481 /*
482  * Computes the overall size of the IPI or the END ESB pages
483  */
484 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
485 {
486     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
487     uint64_t size = 0;
488     int i;
489 
490     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
491         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
492 
493         if (edt_type == type) {
494             size += edt_size;
495         }
496     }
497 
498     return size;
499 }
500 
501 /*
502  * Maps an offset of the VC region in the IPI or END region using the
503  * layout defined by the EDT "Domaine Table"
504  */
505 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
506                                               uint64_t type)
507 {
508     int i;
509     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
510     uint64_t edt_offset = vc_offset;
511 
512     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
513         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
514 
515         if (edt_type != type) {
516             edt_offset -= edt_size;
517         }
518     }
519 
520     return edt_offset;
521 }
522 
523 static void pnv_xive_edt_resize(PnvXive *xive)
524 {
525     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
526     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
527 
528     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
529     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
530 
531     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
532     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
533 }
534 
535 /*
536  * XIVE Table configuration. Only EDT is supported.
537  */
538 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
539 {
540     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
541     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
542     uint64_t *xive_table;
543     uint8_t max_index;
544 
545     switch (tsel) {
546     case CQ_TAR_TSEL_BLK:
547         max_index = ARRAY_SIZE(xive->blk);
548         xive_table = xive->blk;
549         break;
550     case CQ_TAR_TSEL_MIG:
551         max_index = ARRAY_SIZE(xive->mig);
552         xive_table = xive->mig;
553         break;
554     case CQ_TAR_TSEL_EDT:
555         max_index = ARRAY_SIZE(xive->edt);
556         xive_table = xive->edt;
557         break;
558     case CQ_TAR_TSEL_VDT:
559         max_index = ARRAY_SIZE(xive->vdt);
560         xive_table = xive->vdt;
561         break;
562     default:
563         xive_error(xive, "IC: invalid table %d", (int) tsel);
564         return -1;
565     }
566 
567     if (tsel_index >= max_index) {
568         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
569         return -1;
570     }
571 
572     xive_table[tsel_index] = val;
573 
574     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
575         xive->regs[CQ_TAR >> 3] =
576             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
577     }
578 
579     /*
580      * EDT configuration is complete. Resize the MMIO windows exposing
581      * the IPI and the END ESBs in the VC region.
582      */
583     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
584         pnv_xive_edt_resize(xive);
585     }
586 
587     return 0;
588 }
589 
590 /*
591  * Virtual Structure Tables (VST) configuration
592  */
593 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
594                                        uint8_t blk, uint64_t vsd)
595 {
596     XiveENDSource *end_xsrc = &xive->end_source;
597     XiveSource *xsrc = &xive->ipi_source;
598     const XiveVstInfo *info = &vst_infos[type];
599     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
600     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
601 
602     /* Basic checks */
603 
604     if (VSD_INDIRECT & vsd) {
605         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
606             xive_error(xive, "VST: %s indirect tables are not enabled",
607                        info->name);
608             return;
609         }
610 
611         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
612             xive_error(xive, "VST: invalid %s page shift %d", info->name,
613                        page_shift);
614             return;
615         }
616     }
617 
618     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
619         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
620                    " page shift %d", info->name, vst_addr, page_shift);
621         return;
622     }
623 
624     /* Record the table configuration (in SRAM on HW) */
625     xive->vsds[type][blk] = vsd;
626 
627     /* Now tune the models with the configuration provided by the FW */
628 
629     switch (type) {
630     case VST_TSEL_IVT:  /* Nothing to be done */
631         break;
632 
633     case VST_TSEL_EQDT:
634         /*
635          * Backing store pages for the END. Compute the number of ENDs
636          * provisioned by FW and resize the END ESB window accordingly.
637          */
638         memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) *
639                                (1ull << (end_xsrc->esb_shift + 1)));
640         memory_region_add_subregion(&xive->end_edt_mmio, 0,
641                                     &end_xsrc->esb_mmio);
642         break;
643 
644     case VST_TSEL_SBE:
645         /*
646          * Backing store pages for the source PQ bits. The model does
647          * not use these PQ bits backed in RAM because the XiveSource
648          * model has its own. Compute the number of IRQs provisioned
649          * by FW and resize the IPI ESB window accordingly.
650          */
651         memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) *
652                                (1ull << xsrc->esb_shift));
653         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
654         break;
655 
656     case VST_TSEL_VPDT: /* Not modeled */
657     case VST_TSEL_IRQ:  /* Not modeled */
658         /*
659          * These tables contains the backing store pages for the
660          * interrupt fifos of the VC sub-engine in case of overflow.
661          */
662         break;
663 
664     default:
665         g_assert_not_reached();
666     }
667 }
668 
669 /*
670  * Both PC and VC sub-engines are configured as each use the Virtual
671  * Structure Tables : SBE, EAS, END and NVT.
672  */
673 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
674 {
675     uint8_t mode = GETFIELD(VSD_MODE, vsd);
676     uint8_t type = GETFIELD(VST_TABLE_SELECT,
677                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
678     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
679                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
680     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
681 
682     if (type > VST_TSEL_IRQ) {
683         xive_error(xive, "VST: invalid table type %d", type);
684         return;
685     }
686 
687     if (blk >= vst_infos[type].max_blocks) {
688         xive_error(xive, "VST: invalid block id %d for"
689                       " %s table", blk, vst_infos[type].name);
690         return;
691     }
692 
693     /*
694      * Only take the VC sub-engine configuration into account because
695      * the XiveRouter model combines both VC and PC sub-engines
696      */
697     if (pc_engine) {
698         return;
699     }
700 
701     if (!vst_addr) {
702         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
703         return;
704     }
705 
706     switch (mode) {
707     case VSD_MODE_FORWARD:
708         xive->vsds[type][blk] = vsd;
709         break;
710 
711     case VSD_MODE_EXCLUSIVE:
712         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
713         break;
714 
715     default:
716         xive_error(xive, "VST: unsupported table mode %d", mode);
717         return;
718     }
719 }
720 
721 /*
722  * Interrupt controller MMIO region. The layout is compatible between
723  * 4K and 64K pages :
724  *
725  * Page 0           sub-engine BARs
726  *  0x000 - 0x3FF   IC registers
727  *  0x400 - 0x7FF   PC registers
728  *  0x800 - 0xFFF   VC registers
729  *
730  * Page 1           Notify page (writes only)
731  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
732  *  0x800 - 0xFFF   forwards and syncs
733  *
734  * Page 2           LSI Trigger page (writes only) (not modeled)
735  * Page 3           LSI SB EOI page (reads only) (not modeled)
736  *
737  * Page 4-7         indirect TIMA
738  */
739 
740 /*
741  * IC - registers MMIO
742  */
743 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
744                                   uint64_t val, unsigned size)
745 {
746     PnvXive *xive = PNV_XIVE(opaque);
747     MemoryRegion *sysmem = get_system_memory();
748     uint32_t reg = offset >> 3;
749     bool is_chip0 = xive->chip->chip_id == 0;
750 
751     switch (offset) {
752 
753     /*
754      * XIVE CQ (PowerBus bridge) settings
755      */
756     case CQ_MSGSND:     /* msgsnd for doorbells */
757     case CQ_FIRMASK_OR: /* FIR error reporting */
758         break;
759     case CQ_PBI_CTL:
760         if (val & CQ_PBI_PC_64K) {
761             xive->pc_shift = 16;
762         }
763         if (val & CQ_PBI_VC_64K) {
764             xive->vc_shift = 16;
765         }
766         break;
767     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
768         /*
769          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
770          */
771         break;
772 
773     /*
774      * XIVE Virtualization Controller settings
775      */
776     case VC_GLOBAL_CONFIG:
777         break;
778 
779     /*
780      * XIVE Presenter Controller settings
781      */
782     case PC_GLOBAL_CONFIG:
783         /*
784          * PC_GCONF_CHIPID_OVR
785          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
786          */
787         break;
788     case PC_TCTXT_CFG:
789         /*
790          * TODO: block group support
791          *
792          * PC_TCTXT_CFG_BLKGRP_EN
793          * PC_TCTXT_CFG_HARD_CHIPID_BLK :
794          *   Moves the chipid into block field for hardwired CAM compares.
795          *   Block offset value is adjusted to 0b0..01 & ThrdId
796          *
797          *   Will require changes in xive_presenter_tctx_match(). I am
798          *   not sure how to handle that yet.
799          */
800 
801         /* Overrides hardwired chip ID with the chip ID field */
802         if (val & PC_TCTXT_CHIPID_OVERRIDE) {
803             xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
804         }
805         break;
806     case PC_TCTXT_TRACK:
807         /*
808          * PC_TCTXT_TRACK_EN:
809          *   enable block tracking and exchange of block ownership
810          *   information between Interrupt controllers
811          */
812         break;
813 
814     /*
815      * Misc settings
816      */
817     case VC_SBC_CONFIG: /* Store EOI configuration */
818         /*
819          * Configure store EOI if required by firwmare (skiboot has removed
820          * support recently though)
821          */
822         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
823             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
824         }
825         break;
826 
827     case VC_EQC_CONFIG: /* TODO: silent escalation */
828     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
829         break;
830 
831     /*
832      * XIVE BAR settings (XSCOM only)
833      */
834     case CQ_RST_CTL:
835         /* bit4: resets all BAR registers */
836         break;
837 
838     case CQ_IC_BAR: /* IC BAR. 8 pages */
839         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
840         if (!(val & CQ_IC_BAR_VALID)) {
841             xive->ic_base = 0;
842             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
843                 memory_region_del_subregion(&xive->ic_mmio,
844                                             &xive->ic_reg_mmio);
845                 memory_region_del_subregion(&xive->ic_mmio,
846                                             &xive->ic_notify_mmio);
847                 memory_region_del_subregion(&xive->ic_mmio,
848                                             &xive->ic_lsi_mmio);
849                 memory_region_del_subregion(&xive->ic_mmio,
850                                             &xive->tm_indirect_mmio);
851 
852                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
853             }
854         } else {
855             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
856             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
857                 memory_region_add_subregion(sysmem, xive->ic_base,
858                                             &xive->ic_mmio);
859 
860                 memory_region_add_subregion(&xive->ic_mmio,  0,
861                                             &xive->ic_reg_mmio);
862                 memory_region_add_subregion(&xive->ic_mmio,
863                                             1ul << xive->ic_shift,
864                                             &xive->ic_notify_mmio);
865                 memory_region_add_subregion(&xive->ic_mmio,
866                                             2ul << xive->ic_shift,
867                                             &xive->ic_lsi_mmio);
868                 memory_region_add_subregion(&xive->ic_mmio,
869                                             4ull << xive->ic_shift,
870                                             &xive->tm_indirect_mmio);
871             }
872         }
873         break;
874 
875     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
876     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
877         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
878         if (!(val & CQ_TM_BAR_VALID)) {
879             xive->tm_base = 0;
880             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
881                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
882             }
883         } else {
884             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
885             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
886                 memory_region_add_subregion(sysmem, xive->tm_base,
887                                             &xive->tm_mmio);
888             }
889         }
890         break;
891 
892     case CQ_PC_BARM:
893         xive->regs[reg] = val;
894         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
895         break;
896     case CQ_PC_BAR: /* From 32M to 512G */
897         if (!(val & CQ_PC_BAR_VALID)) {
898             xive->pc_base = 0;
899             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
900                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
901             }
902         } else {
903             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
904             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
905                 memory_region_add_subregion(sysmem, xive->pc_base,
906                                             &xive->pc_mmio);
907             }
908         }
909         break;
910 
911     case CQ_VC_BARM:
912         xive->regs[reg] = val;
913         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
914         break;
915     case CQ_VC_BAR: /* From 64M to 4TB */
916         if (!(val & CQ_VC_BAR_VALID)) {
917             xive->vc_base = 0;
918             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
919                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
920             }
921         } else {
922             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
923             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
924                 memory_region_add_subregion(sysmem, xive->vc_base,
925                                             &xive->vc_mmio);
926             }
927         }
928         break;
929 
930     /*
931      * XIVE Table settings.
932      */
933     case CQ_TAR: /* Table Address */
934         break;
935     case CQ_TDR: /* Table Data */
936         pnv_xive_table_set_data(xive, val);
937         break;
938 
939     /*
940      * XIVE VC & PC Virtual Structure Table settings
941      */
942     case VC_VSD_TABLE_ADDR:
943     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
944         break;
945     case VC_VSD_TABLE_DATA: /* Virtual table setting */
946     case PC_VSD_TABLE_DATA:
947         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
948         break;
949 
950     /*
951      * Interrupt fifo overflow in memory backing store (Not modeled)
952      */
953     case VC_IRQ_CONFIG_IPI:
954     case VC_IRQ_CONFIG_HW:
955     case VC_IRQ_CONFIG_CASCADE1:
956     case VC_IRQ_CONFIG_CASCADE2:
957     case VC_IRQ_CONFIG_REDIST:
958     case VC_IRQ_CONFIG_IPI_CASC:
959         break;
960 
961     /*
962      * XIVE hardware thread enablement
963      */
964     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
965     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
966         break;
967 
968     case PC_THREAD_EN_REG0_SET:
969         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
970         break;
971     case PC_THREAD_EN_REG1_SET:
972         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
973         break;
974     case PC_THREAD_EN_REG0_CLR:
975         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
976         break;
977     case PC_THREAD_EN_REG1_CLR:
978         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
979         break;
980 
981     /*
982      * Indirect TIMA access set up. Defines the PIR of the HW thread
983      * to use.
984      */
985     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
986         break;
987 
988     /*
989      * XIVE PC & VC cache updates for EAS, NVT and END
990      */
991     case VC_IVC_SCRUB_MASK:
992     case VC_IVC_SCRUB_TRIG:
993         break;
994 
995     case VC_EQC_CWATCH_SPEC:
996         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
997         break;
998     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
999         break;
1000     case VC_EQC_CWATCH_DAT0:
1001         /* writing to DATA0 triggers the cache write */
1002         xive->regs[reg] = val;
1003         pnv_xive_end_update(xive);
1004         break;
1005     case VC_EQC_SCRUB_MASK:
1006     case VC_EQC_SCRUB_TRIG:
1007         /*
1008          * The scrubbing registers flush the cache in RAM and can also
1009          * invalidate.
1010          */
1011         break;
1012 
1013     case PC_VPC_CWATCH_SPEC:
1014         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1015         break;
1016     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1017         break;
1018     case PC_VPC_CWATCH_DAT0:
1019         /* writing to DATA0 triggers the cache write */
1020         xive->regs[reg] = val;
1021         pnv_xive_nvt_update(xive);
1022         break;
1023     case PC_VPC_SCRUB_MASK:
1024     case PC_VPC_SCRUB_TRIG:
1025         /*
1026          * The scrubbing registers flush the cache in RAM and can also
1027          * invalidate.
1028          */
1029         break;
1030 
1031 
1032     /*
1033      * XIVE PC & VC cache invalidation
1034      */
1035     case PC_AT_KILL:
1036         break;
1037     case VC_AT_MACRO_KILL:
1038         break;
1039     case PC_AT_KILL_MASK:
1040     case VC_AT_MACRO_KILL_MASK:
1041         break;
1042 
1043     default:
1044         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1045         return;
1046     }
1047 
1048     xive->regs[reg] = val;
1049 }
1050 
1051 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1052 {
1053     PnvXive *xive = PNV_XIVE(opaque);
1054     uint64_t val = 0;
1055     uint32_t reg = offset >> 3;
1056 
1057     switch (offset) {
1058     case CQ_CFG_PB_GEN:
1059     case CQ_IC_BAR:
1060     case CQ_TM1_BAR:
1061     case CQ_TM2_BAR:
1062     case CQ_PC_BAR:
1063     case CQ_PC_BARM:
1064     case CQ_VC_BAR:
1065     case CQ_VC_BARM:
1066     case CQ_TAR:
1067     case CQ_TDR:
1068     case CQ_PBI_CTL:
1069 
1070     case PC_TCTXT_CFG:
1071     case PC_TCTXT_TRACK:
1072     case PC_TCTXT_INDIR0:
1073     case PC_TCTXT_INDIR1:
1074     case PC_TCTXT_INDIR2:
1075     case PC_TCTXT_INDIR3:
1076     case PC_GLOBAL_CONFIG:
1077 
1078     case PC_VPC_SCRUB_MASK:
1079 
1080     case VC_GLOBAL_CONFIG:
1081     case VC_AIB_TX_ORDER_TAG2:
1082 
1083     case VC_IRQ_CONFIG_IPI:
1084     case VC_IRQ_CONFIG_HW:
1085     case VC_IRQ_CONFIG_CASCADE1:
1086     case VC_IRQ_CONFIG_CASCADE2:
1087     case VC_IRQ_CONFIG_REDIST:
1088     case VC_IRQ_CONFIG_IPI_CASC:
1089 
1090     case VC_EQC_SCRUB_MASK:
1091     case VC_IVC_SCRUB_MASK:
1092     case VC_SBC_CONFIG:
1093     case VC_AT_MACRO_KILL_MASK:
1094     case VC_VSD_TABLE_ADDR:
1095     case PC_VSD_TABLE_ADDR:
1096     case VC_VSD_TABLE_DATA:
1097     case PC_VSD_TABLE_DATA:
1098     case PC_THREAD_EN_REG0:
1099     case PC_THREAD_EN_REG1:
1100         val = xive->regs[reg];
1101         break;
1102 
1103     /*
1104      * XIVE hardware thread enablement
1105      */
1106     case PC_THREAD_EN_REG0_SET:
1107     case PC_THREAD_EN_REG0_CLR:
1108         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1109         break;
1110     case PC_THREAD_EN_REG1_SET:
1111     case PC_THREAD_EN_REG1_CLR:
1112         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1113         break;
1114 
1115     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1116         val = 0xffffff0000000000;
1117         break;
1118 
1119     /*
1120      * XIVE PC & VC cache updates for EAS, NVT and END
1121      */
1122     case VC_EQC_CWATCH_SPEC:
1123         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1124         val = xive->regs[reg];
1125         break;
1126     case VC_EQC_CWATCH_DAT0:
1127         /*
1128          * Load DATA registers from cache with data requested by the
1129          * SPEC register
1130          */
1131         pnv_xive_end_cache_load(xive);
1132         val = xive->regs[reg];
1133         break;
1134     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1135         val = xive->regs[reg];
1136         break;
1137 
1138     case PC_VPC_CWATCH_SPEC:
1139         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1140         val = xive->regs[reg];
1141         break;
1142     case PC_VPC_CWATCH_DAT0:
1143         /*
1144          * Load DATA registers from cache with data requested by the
1145          * SPEC register
1146          */
1147         pnv_xive_nvt_cache_load(xive);
1148         val = xive->regs[reg];
1149         break;
1150     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1151         val = xive->regs[reg];
1152         break;
1153 
1154     case PC_VPC_SCRUB_TRIG:
1155     case VC_IVC_SCRUB_TRIG:
1156     case VC_EQC_SCRUB_TRIG:
1157         xive->regs[reg] &= ~VC_SCRUB_VALID;
1158         val = xive->regs[reg];
1159         break;
1160 
1161     /*
1162      * XIVE PC & VC cache invalidation
1163      */
1164     case PC_AT_KILL:
1165         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1166         val = xive->regs[reg];
1167         break;
1168     case VC_AT_MACRO_KILL:
1169         xive->regs[reg] &= ~VC_KILL_VALID;
1170         val = xive->regs[reg];
1171         break;
1172 
1173     /*
1174      * XIVE synchronisation
1175      */
1176     case VC_EQC_CONFIG:
1177         val = VC_EQC_SYNC_MASK;
1178         break;
1179 
1180     default:
1181         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1182     }
1183 
1184     return val;
1185 }
1186 
1187 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1188     .read = pnv_xive_ic_reg_read,
1189     .write = pnv_xive_ic_reg_write,
1190     .endianness = DEVICE_BIG_ENDIAN,
1191     .valid = {
1192         .min_access_size = 8,
1193         .max_access_size = 8,
1194     },
1195     .impl = {
1196         .min_access_size = 8,
1197         .max_access_size = 8,
1198     },
1199 };
1200 
1201 /*
1202  * IC - Notify MMIO port page (write only)
1203  */
1204 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1205 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1206 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1207 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1208 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1209 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1210 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1211 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1212 
1213 /* VC synchronisation */
1214 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1215 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1216 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1217 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1218 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1219 
1220 /* PC synchronisation */
1221 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1222 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1223 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1224 
1225 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1226 {
1227     /*
1228      * Forward the source event notification directly to the Router.
1229      * The source interrupt number should already be correctly encoded
1230      * with the chip block id by the sending device (PHB, PSI).
1231      */
1232     xive_router_notify(XIVE_NOTIFIER(xive), val);
1233 }
1234 
1235 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1236                                      unsigned size)
1237 {
1238     PnvXive *xive = PNV_XIVE(opaque);
1239 
1240     /* VC: HW triggers */
1241     switch (addr) {
1242     case 0x000 ... 0x7FF:
1243         pnv_xive_ic_hw_trigger(opaque, addr, val);
1244         break;
1245 
1246     /* VC: Forwarded IRQs */
1247     case PNV_XIVE_FORWARD_IPI:
1248     case PNV_XIVE_FORWARD_HW:
1249     case PNV_XIVE_FORWARD_OS_ESC:
1250     case PNV_XIVE_FORWARD_HW_ESC:
1251     case PNV_XIVE_FORWARD_REDIS:
1252         /* TODO: forwarded IRQs. Should be like HW triggers */
1253         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1254                    addr, val);
1255         break;
1256 
1257     /* VC syncs */
1258     case PNV_XIVE_SYNC_IPI:
1259     case PNV_XIVE_SYNC_HW:
1260     case PNV_XIVE_SYNC_OS_ESC:
1261     case PNV_XIVE_SYNC_HW_ESC:
1262     case PNV_XIVE_SYNC_REDIS:
1263         break;
1264 
1265     /* PC syncs */
1266     case PNV_XIVE_SYNC_PULL:
1267     case PNV_XIVE_SYNC_PUSH:
1268     case PNV_XIVE_SYNC_VPC:
1269         break;
1270 
1271     default:
1272         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1273     }
1274 }
1275 
1276 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1277                                         unsigned size)
1278 {
1279     PnvXive *xive = PNV_XIVE(opaque);
1280 
1281     /* loads are invalid */
1282     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1283     return -1;
1284 }
1285 
1286 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1287     .read = pnv_xive_ic_notify_read,
1288     .write = pnv_xive_ic_notify_write,
1289     .endianness = DEVICE_BIG_ENDIAN,
1290     .valid = {
1291         .min_access_size = 8,
1292         .max_access_size = 8,
1293     },
1294     .impl = {
1295         .min_access_size = 8,
1296         .max_access_size = 8,
1297     },
1298 };
1299 
1300 /*
1301  * IC - LSI MMIO handlers (not modeled)
1302  */
1303 
1304 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1305                               uint64_t val, unsigned size)
1306 {
1307     PnvXive *xive = PNV_XIVE(opaque);
1308 
1309     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1310 }
1311 
1312 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1313 {
1314     PnvXive *xive = PNV_XIVE(opaque);
1315 
1316     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1317     return -1;
1318 }
1319 
1320 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1321     .read = pnv_xive_ic_lsi_read,
1322     .write = pnv_xive_ic_lsi_write,
1323     .endianness = DEVICE_BIG_ENDIAN,
1324     .valid = {
1325         .min_access_size = 8,
1326         .max_access_size = 8,
1327     },
1328     .impl = {
1329         .min_access_size = 8,
1330         .max_access_size = 8,
1331     },
1332 };
1333 
1334 /*
1335  * IC - Indirect TIMA MMIO handlers
1336  */
1337 
1338 /*
1339  * When the TIMA is accessed from the indirect page, the thread id
1340  * (PIR) has to be configured in the IC registers before. This is used
1341  * for resets and for debug purpose also.
1342  */
1343 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1344 {
1345     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1346     PowerPCCPU *cpu = NULL;
1347     int pir;
1348 
1349     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1350         xive_error(xive, "IC: no indirect TIMA access in progress");
1351         return NULL;
1352     }
1353 
1354     pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff;
1355     cpu = ppc_get_vcpu_by_pir(pir);
1356     if (!cpu) {
1357         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1358         return NULL;
1359     }
1360 
1361     /* Check that HW thread is XIVE enabled */
1362     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
1363         xive_error(xive, "IC: CPU %x is not enabled", pir);
1364     }
1365 
1366     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1367 }
1368 
1369 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1370                                    uint64_t value, unsigned size)
1371 {
1372     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1373 
1374     xive_tctx_tm_write(tctx, offset, value, size);
1375 }
1376 
1377 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1378                                       unsigned size)
1379 {
1380     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1381 
1382     return xive_tctx_tm_read(tctx, offset, size);
1383 }
1384 
1385 static const MemoryRegionOps xive_tm_indirect_ops = {
1386     .read = xive_tm_indirect_read,
1387     .write = xive_tm_indirect_write,
1388     .endianness = DEVICE_BIG_ENDIAN,
1389     .valid = {
1390         .min_access_size = 1,
1391         .max_access_size = 8,
1392     },
1393     .impl = {
1394         .min_access_size = 1,
1395         .max_access_size = 8,
1396     },
1397 };
1398 
1399 /*
1400  * Interrupt controller XSCOM region.
1401  */
1402 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1403 {
1404     switch (addr >> 3) {
1405     case X_VC_EQC_CONFIG:
1406         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1407         return VC_EQC_SYNC_MASK;
1408     default:
1409         return pnv_xive_ic_reg_read(opaque, addr, size);
1410     }
1411 }
1412 
1413 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1414                                 uint64_t val, unsigned size)
1415 {
1416     pnv_xive_ic_reg_write(opaque, addr, val, size);
1417 }
1418 
1419 static const MemoryRegionOps pnv_xive_xscom_ops = {
1420     .read = pnv_xive_xscom_read,
1421     .write = pnv_xive_xscom_write,
1422     .endianness = DEVICE_BIG_ENDIAN,
1423     .valid = {
1424         .min_access_size = 8,
1425         .max_access_size = 8,
1426     },
1427     .impl = {
1428         .min_access_size = 8,
1429         .max_access_size = 8,
1430     }
1431 };
1432 
1433 /*
1434  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1435  */
1436 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1437                                  unsigned size)
1438 {
1439     PnvXive *xive = PNV_XIVE(opaque);
1440     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1441     uint64_t edt_type = 0;
1442     uint64_t edt_offset;
1443     MemTxResult result;
1444     AddressSpace *edt_as = NULL;
1445     uint64_t ret = -1;
1446 
1447     if (edt_index < XIVE_TABLE_EDT_MAX) {
1448         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1449     }
1450 
1451     switch (edt_type) {
1452     case CQ_TDR_EDT_IPI:
1453         edt_as = &xive->ipi_as;
1454         break;
1455     case CQ_TDR_EDT_EQ:
1456         edt_as = &xive->end_as;
1457         break;
1458     default:
1459         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1460         return -1;
1461     }
1462 
1463     /* Remap the offset for the targeted address space */
1464     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1465 
1466     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1467                             &result);
1468 
1469     if (result != MEMTX_OK) {
1470         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1471                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1472                    offset, edt_offset);
1473         return -1;
1474     }
1475 
1476     return ret;
1477 }
1478 
1479 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1480                               uint64_t val, unsigned size)
1481 {
1482     PnvXive *xive = PNV_XIVE(opaque);
1483     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1484     uint64_t edt_type = 0;
1485     uint64_t edt_offset;
1486     MemTxResult result;
1487     AddressSpace *edt_as = NULL;
1488 
1489     if (edt_index < XIVE_TABLE_EDT_MAX) {
1490         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1491     }
1492 
1493     switch (edt_type) {
1494     case CQ_TDR_EDT_IPI:
1495         edt_as = &xive->ipi_as;
1496         break;
1497     case CQ_TDR_EDT_EQ:
1498         edt_as = &xive->end_as;
1499         break;
1500     default:
1501         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1502                    offset);
1503         return;
1504     }
1505 
1506     /* Remap the offset for the targeted address space */
1507     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1508 
1509     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1510     if (result != MEMTX_OK) {
1511         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1512     }
1513 }
1514 
1515 static const MemoryRegionOps pnv_xive_vc_ops = {
1516     .read = pnv_xive_vc_read,
1517     .write = pnv_xive_vc_write,
1518     .endianness = DEVICE_BIG_ENDIAN,
1519     .valid = {
1520         .min_access_size = 8,
1521         .max_access_size = 8,
1522     },
1523     .impl = {
1524         .min_access_size = 8,
1525         .max_access_size = 8,
1526     },
1527 };
1528 
1529 /*
1530  * Presenter Controller MMIO region. The Virtualization Controller
1531  * updates the IPB in the NVT table when required. Not modeled.
1532  */
1533 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1534                                  unsigned size)
1535 {
1536     PnvXive *xive = PNV_XIVE(opaque);
1537 
1538     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1539     return -1;
1540 }
1541 
1542 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1543                               uint64_t value, unsigned size)
1544 {
1545     PnvXive *xive = PNV_XIVE(opaque);
1546 
1547     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1548 }
1549 
1550 static const MemoryRegionOps pnv_xive_pc_ops = {
1551     .read = pnv_xive_pc_read,
1552     .write = pnv_xive_pc_write,
1553     .endianness = DEVICE_BIG_ENDIAN,
1554     .valid = {
1555         .min_access_size = 8,
1556         .max_access_size = 8,
1557     },
1558     .impl = {
1559         .min_access_size = 8,
1560         .max_access_size = 8,
1561     },
1562 };
1563 
1564 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1565 {
1566     XiveRouter *xrtr = XIVE_ROUTER(xive);
1567     uint8_t blk = xive->chip->chip_id;
1568     uint32_t srcno0 = XIVE_SRCNO(blk, 0);
1569     uint32_t nr_ipis = pnv_xive_nr_ipis(xive);
1570     uint32_t nr_ends = pnv_xive_nr_ends(xive);
1571     XiveEAS eas;
1572     XiveEND end;
1573     int i;
1574 
1575     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1576                    srcno0 + nr_ipis - 1);
1577     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1578 
1579     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1580                    srcno0 + nr_ipis - 1);
1581     for (i = 0; i < nr_ipis; i++) {
1582         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1583             break;
1584         }
1585         if (!xive_eas_is_masked(&eas)) {
1586             xive_eas_pic_print_info(&eas, i, mon);
1587         }
1588     }
1589 
1590     monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1);
1591     for (i = 0; i < nr_ends; i++) {
1592         if (xive_router_get_end(xrtr, blk, i, &end)) {
1593             break;
1594         }
1595         xive_end_pic_print_info(&end, i, mon);
1596     }
1597 }
1598 
1599 static void pnv_xive_reset(void *dev)
1600 {
1601     PnvXive *xive = PNV_XIVE(dev);
1602     XiveSource *xsrc = &xive->ipi_source;
1603     XiveENDSource *end_xsrc = &xive->end_source;
1604 
1605     /*
1606      * Use the PnvChip id to identify the XIVE interrupt controller.
1607      * It can be overriden by configuration at runtime.
1608      */
1609     xive->tctx_chipid = xive->chip->chip_id;
1610 
1611     /* Default page size (Should be changed at runtime to 64k) */
1612     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1613 
1614     /* Clear subregions */
1615     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1616         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1617     }
1618 
1619     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1620         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1621     }
1622 
1623     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1624         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1625     }
1626 
1627     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1628         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1629     }
1630 }
1631 
1632 static void pnv_xive_init(Object *obj)
1633 {
1634     PnvXive *xive = PNV_XIVE(obj);
1635 
1636     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1637                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1638                             &error_abort, NULL);
1639     object_initialize_child(obj, "end_source", &xive->end_source,
1640                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1641                             &error_abort, NULL);
1642 }
1643 
1644 /*
1645  *  Maximum number of IRQs and ENDs supported by HW
1646  */
1647 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1648 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1649 
1650 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1651 {
1652     PnvXive *xive = PNV_XIVE(dev);
1653     XiveSource *xsrc = &xive->ipi_source;
1654     XiveENDSource *end_xsrc = &xive->end_source;
1655     Error *local_err = NULL;
1656     Object *obj;
1657 
1658     obj = object_property_get_link(OBJECT(dev), "chip", &local_err);
1659     if (!obj) {
1660         error_propagate(errp, local_err);
1661         error_prepend(errp, "required link 'chip' not found: ");
1662         return;
1663     }
1664 
1665     /* The PnvChip id identifies the XIVE interrupt controller. */
1666     xive->chip = PNV_CHIP(obj);
1667 
1668     /*
1669      * The XiveSource and XiveENDSource objects are realized with the
1670      * maximum allowed HW configuration. The ESB MMIO regions will be
1671      * resized dynamically when the controller is configured by the FW
1672      * to limit accesses to resources not provisioned.
1673      */
1674     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1675                             &error_fatal);
1676     object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
1677                                    &error_fatal);
1678     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1679     if (local_err) {
1680         error_propagate(errp, local_err);
1681         return;
1682     }
1683 
1684     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1685                             &error_fatal);
1686     object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1687                                    &error_fatal);
1688     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1689     if (local_err) {
1690         error_propagate(errp, local_err);
1691         return;
1692     }
1693 
1694     /* Default page size. Generally changed at runtime to 64k */
1695     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1696 
1697     /* XSCOM region, used for initial configuration of the BARs */
1698     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1699                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1700 
1701     /* Interrupt controller MMIO regions */
1702     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1703                        PNV9_XIVE_IC_SIZE);
1704 
1705     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1706                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1707     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1708                           &pnv_xive_ic_notify_ops,
1709                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1710 
1711     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1712     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1713                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1714 
1715     /* Thread Interrupt Management Area (Indirect) */
1716     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1717                           &xive_tm_indirect_ops,
1718                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1719     /*
1720      * Overall Virtualization Controller MMIO region containing the
1721      * IPI ESB pages and END ESB pages. The layout is defined by the
1722      * EDT "Domain table" and the accesses are dispatched using
1723      * address spaces for each.
1724      */
1725     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1726                           "xive-vc", PNV9_XIVE_VC_SIZE);
1727 
1728     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1729                        PNV9_XIVE_VC_SIZE);
1730     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1731     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1732                        PNV9_XIVE_VC_SIZE);
1733     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1734 
1735     /*
1736      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1737      * VC region. Their size is configured by the FW in the EDT table.
1738      */
1739     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1740     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1741 
1742     /* Presenter Controller MMIO region (not modeled) */
1743     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1744                           "xive-pc", PNV9_XIVE_PC_SIZE);
1745 
1746     /* Thread Interrupt Management Area (Direct) */
1747     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1748                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1749 
1750     qemu_register_reset(pnv_xive_reset, dev);
1751 }
1752 
1753 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1754                              int xscom_offset)
1755 {
1756     const char compat[] = "ibm,power9-xive-x";
1757     char *name;
1758     int offset;
1759     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1760     uint32_t reg[] = {
1761         cpu_to_be32(lpc_pcba),
1762         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1763     };
1764 
1765     name = g_strdup_printf("xive@%x", lpc_pcba);
1766     offset = fdt_add_subnode(fdt, xscom_offset, name);
1767     _FDT(offset);
1768     g_free(name);
1769 
1770     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1771     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1772                       sizeof(compat))));
1773     return 0;
1774 }
1775 
1776 static Property pnv_xive_properties[] = {
1777     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1778     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1779     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1780     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1781     DEFINE_PROP_END_OF_LIST(),
1782 };
1783 
1784 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1785 {
1786     DeviceClass *dc = DEVICE_CLASS(klass);
1787     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1788     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1789     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1790 
1791     xdc->dt_xscom = pnv_xive_dt_xscom;
1792 
1793     dc->desc = "PowerNV XIVE Interrupt Controller";
1794     dc->realize = pnv_xive_realize;
1795     dc->props = pnv_xive_properties;
1796 
1797     xrc->get_eas = pnv_xive_get_eas;
1798     xrc->get_end = pnv_xive_get_end;
1799     xrc->write_end = pnv_xive_write_end;
1800     xrc->get_nvt = pnv_xive_get_nvt;
1801     xrc->write_nvt = pnv_xive_write_nvt;
1802     xrc->get_tctx = pnv_xive_get_tctx;
1803 
1804     xnc->notify = pnv_xive_notify;
1805 };
1806 
1807 static const TypeInfo pnv_xive_info = {
1808     .name          = TYPE_PNV_XIVE,
1809     .parent        = TYPE_XIVE_ROUTER,
1810     .instance_init = pnv_xive_init,
1811     .instance_size = sizeof(PnvXive),
1812     .class_init    = pnv_xive_class_init,
1813     .interfaces    = (InterfaceInfo[]) {
1814         { TYPE_PNV_XSCOM_INTERFACE },
1815         { }
1816     }
1817 };
1818 
1819 static void pnv_xive_register_types(void)
1820 {
1821     type_register_static(&pnv_xive_info);
1822 }
1823 
1824 type_init(pnv_xive_register_types)
1825