xref: /openbmc/qemu/hw/intc/pnv_xive.c (revision cd55b1272e3fa341b3f7c32b6186a2d0f71a45c8)
1 /*
2  * QEMU PowerPC XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2019, IBM Corporation.
5  *
6  * This code is licensed under the GPL version 2 or later. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
27 
28 #include <libfdt.h>
29 
30 #include "pnv_xive_regs.h"
31 
32 #undef XIVE_DEBUG
33 
34 /*
35  * Virtual structures table (VST)
36  */
37 #define SBE_PER_BYTE   4
38 
39 typedef struct XiveVstInfo {
40     const char *name;
41     uint32_t    size;
42     uint32_t    max_blocks;
43 } XiveVstInfo;
44 
45 static const XiveVstInfo vst_infos[] = {
46     [VST_TSEL_IVT]  = { "EAT",  sizeof(XiveEAS), 16 },
47     [VST_TSEL_SBE]  = { "SBE",  1,               16 },
48     [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
49     [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
50 
51     /*
52      *  Interrupt fifo backing store table (not modeled) :
53      *
54      * 0 - IPI,
55      * 1 - HWD,
56      * 2 - First escalate,
57      * 3 - Second escalate,
58      * 4 - Redistribution,
59      * 5 - IPI cascaded queue ?
60      */
61     [VST_TSEL_IRQ]  = { "IRQ",  1,               6  },
62 };
63 
64 #define xive_error(xive, fmt, ...)                                      \
65     qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n",              \
66                   (xive)->chip->chip_id, ## __VA_ARGS__);
67 
68 /*
69  * QEMU version of the GETFIELD/SETFIELD macros
70  *
71  * TODO: It might be better to use the existing extract64() and
72  * deposit64() but this means that all the register definitions will
73  * change and become incompatible with the ones found in skiboot.
74  *
75  * Keep it as it is for now until we find a common ground.
76  */
77 static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
78 {
79     return (word & mask) >> ctz64(mask);
80 }
81 
82 static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
83                                 uint64_t value)
84 {
85     return (word & ~mask) | ((value << ctz64(mask)) & mask);
86 }
87 
88 /*
89  * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90  * of the chips is good enough.
91  *
92  * TODO: Block scope support
93  */
94 static PnvXive *pnv_xive_get_ic(uint8_t blk)
95 {
96     PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
97     int i;
98 
99     for (i = 0; i < pnv->num_chips; i++) {
100         Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
101         PnvXive *xive = &chip9->xive;
102 
103         if (xive->chip->chip_id == blk) {
104             return xive;
105         }
106     }
107     return NULL;
108 }
109 
110 /*
111  * VST accessors for SBE, EAT, ENDT, NVT
112  *
113  * Indirect VST tables are arrays of VSDs pointing to a page (of same
114  * size). Each page is a direct VST table.
115  */
116 
117 #define XIVE_VSD_SIZE 8
118 
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
121 {
122      return page_shift == 12 || page_shift == 16 ||
123          page_shift == 21 || page_shift == 24;
124 }
125 
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
127                                          uint64_t vsd, uint32_t idx)
128 {
129     const XiveVstInfo *info = &vst_infos[type];
130     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
131     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
132     uint32_t idx_max;
133 
134     idx_max = vst_tsize / info->size - 1;
135     if (idx > idx_max) {
136 #ifdef XIVE_DEBUG
137         xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138                    info->name, idx, idx_max);
139 #endif
140         return 0;
141     }
142 
143     return vst_addr + idx * info->size;
144 }
145 
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
147                                            uint64_t vsd, uint32_t idx)
148 {
149     const XiveVstInfo *info = &vst_infos[type];
150     uint64_t vsd_addr;
151     uint32_t vsd_idx;
152     uint32_t page_shift;
153     uint32_t vst_per_page;
154 
155     /* Get the page size of the indirect table. */
156     vsd_addr = vsd & VSD_ADDRESS_MASK;
157     vsd = ldq_be_dma(&address_space_memory, vsd_addr);
158 
159     if (!(vsd & VSD_ADDRESS_MASK)) {
160 #ifdef XIVE_DEBUG
161         xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
162 #endif
163         return 0;
164     }
165 
166     page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
167 
168     if (!pnv_xive_vst_page_size_allowed(page_shift)) {
169         xive_error(xive, "VST: invalid %s page shift %d", info->name,
170                    page_shift);
171         return 0;
172     }
173 
174     vst_per_page = (1ull << page_shift) / info->size;
175     vsd_idx = idx / vst_per_page;
176 
177     /* Load the VSD we are looking for, if not already done */
178     if (vsd_idx) {
179         vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
180         vsd = ldq_be_dma(&address_space_memory, vsd_addr);
181 
182         if (!(vsd & VSD_ADDRESS_MASK)) {
183 #ifdef XIVE_DEBUG
184             xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
185 #endif
186             return 0;
187         }
188 
189         /*
190          * Check that the pages have a consistent size across the
191          * indirect table
192          */
193         if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
194             xive_error(xive, "VST: %s entry %x indirect page size differ !?",
195                        info->name, idx);
196             return 0;
197         }
198     }
199 
200     return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
201 }
202 
203 static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
204                                   uint32_t idx)
205 {
206     const XiveVstInfo *info = &vst_infos[type];
207     uint64_t vsd;
208 
209     if (blk >= info->max_blocks) {
210         xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
211                    blk, info->name, idx);
212         return 0;
213     }
214 
215     vsd = xive->vsds[type][blk];
216 
217     /* Remote VST access */
218     if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
219         xive = pnv_xive_get_ic(blk);
220 
221         return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
222     }
223 
224     if (VSD_INDIRECT & vsd) {
225         return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
226     }
227 
228     return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
229 }
230 
231 static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
232                              uint32_t idx, void *data)
233 {
234     const XiveVstInfo *info = &vst_infos[type];
235     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
236 
237     if (!addr) {
238         return -1;
239     }
240 
241     cpu_physical_memory_read(addr, data, info->size);
242     return 0;
243 }
244 
245 #define XIVE_VST_WORD_ALL -1
246 
247 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
248                               uint32_t idx, void *data, uint32_t word_number)
249 {
250     const XiveVstInfo *info = &vst_infos[type];
251     uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
252 
253     if (!addr) {
254         return -1;
255     }
256 
257     if (word_number == XIVE_VST_WORD_ALL) {
258         cpu_physical_memory_write(addr, data, info->size);
259     } else {
260         cpu_physical_memory_write(addr + word_number * 4,
261                                   data + word_number * 4, 4);
262     }
263     return 0;
264 }
265 
266 static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
267                             XiveEND *end)
268 {
269     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
270 }
271 
272 static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
273                               XiveEND *end, uint8_t word_number)
274 {
275     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
276                               word_number);
277 }
278 
279 static int pnv_xive_end_update(PnvXive *xive)
280 {
281     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
282                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
283     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
284                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
285     int i;
286     uint64_t eqc_watch[4];
287 
288     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
289         eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
290     }
291 
292     return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
293                               XIVE_VST_WORD_ALL);
294 }
295 
296 static void pnv_xive_end_cache_load(PnvXive *xive)
297 {
298     uint8_t  blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
299                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
300     uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
301                            xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
302     uint64_t eqc_watch[4] = { 0 };
303     int i;
304 
305     if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
306         xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
307     }
308 
309     for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
310         xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
311     }
312 }
313 
314 static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
315                             XiveNVT *nvt)
316 {
317     return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
318 }
319 
320 static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
321                               XiveNVT *nvt, uint8_t word_number)
322 {
323     return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
324                               word_number);
325 }
326 
327 static int pnv_xive_nvt_update(PnvXive *xive)
328 {
329     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
330                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
331     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
332                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
333     int i;
334     uint64_t vpc_watch[8];
335 
336     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
337         vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
338     }
339 
340     return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
341                               XIVE_VST_WORD_ALL);
342 }
343 
344 static void pnv_xive_nvt_cache_load(PnvXive *xive)
345 {
346     uint8_t  blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
347                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
348     uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
349                            xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
350     uint64_t vpc_watch[8] = { 0 };
351     int i;
352 
353     if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
354         xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
355     }
356 
357     for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
358         xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
359     }
360 }
361 
362 static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
363                             XiveEAS *eas)
364 {
365     PnvXive *xive = PNV_XIVE(xrtr);
366 
367     if (pnv_xive_get_ic(blk) != xive) {
368         xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
369         return -1;
370     }
371 
372     return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
373 }
374 
375 static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
376 {
377     PowerPCCPU *cpu = POWERPC_CPU(cs);
378     XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
379     PnvXive *xive = NULL;
380     CPUPPCState *env = &cpu->env;
381     int pir = env->spr_cb[SPR_PIR].default_value;
382 
383     /*
384      * Perform an extra check on the HW thread enablement.
385      *
386      * The TIMA is shared among the chips and to identify the chip
387      * from which the access is being done, we extract the chip id
388      * from the PIR.
389      */
390     xive = pnv_xive_get_ic((pir >> 8) & 0xf);
391     if (!xive) {
392         return NULL;
393     }
394 
395     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
396         xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
397     }
398 
399     return tctx;
400 }
401 
402 /*
403  * The internal sources (IPIs) of the interrupt controller have no
404  * knowledge of the XIVE chip on which they reside. Encode the block
405  * id in the source interrupt number before forwarding the source
406  * event notification to the Router. This is required on a multichip
407  * system.
408  */
409 static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
410 {
411     PnvXive *xive = PNV_XIVE(xn);
412     uint8_t blk = xive->chip->chip_id;
413 
414     xive_router_notify(xn, XIVE_EAS(blk, srcno));
415 }
416 
417 /*
418  * XIVE helpers
419  */
420 
421 static uint64_t pnv_xive_vc_size(PnvXive *xive)
422 {
423     return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
424 }
425 
426 static uint64_t pnv_xive_edt_shift(PnvXive *xive)
427 {
428     return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
429 }
430 
431 static uint64_t pnv_xive_pc_size(PnvXive *xive)
432 {
433     return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
434 }
435 
436 static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
437 {
438     uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
439     uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
440 
441     return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
442 }
443 
444 /*
445  * EDT Table
446  *
447  * The Virtualization Controller MMIO region containing the IPI ESB
448  * pages and END ESB pages is sub-divided into "sets" which map
449  * portions of the VC region to the different ESB pages. It is
450  * configured at runtime through the EDT "Domain Table" to let the
451  * firmware decide how to split the VC address space between IPI ESB
452  * pages and END ESB pages.
453  */
454 
455 /*
456  * Computes the overall size of the IPI or the END ESB pages
457  */
458 static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
459 {
460     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
461     uint64_t size = 0;
462     int i;
463 
464     for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
465         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
466 
467         if (edt_type == type) {
468             size += edt_size;
469         }
470     }
471 
472     return size;
473 }
474 
475 /*
476  * Maps an offset of the VC region in the IPI or END region using the
477  * layout defined by the EDT "Domaine Table"
478  */
479 static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
480                                               uint64_t type)
481 {
482     int i;
483     uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
484     uint64_t edt_offset = vc_offset;
485 
486     for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
487         uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
488 
489         if (edt_type != type) {
490             edt_offset -= edt_size;
491         }
492     }
493 
494     return edt_offset;
495 }
496 
497 static void pnv_xive_edt_resize(PnvXive *xive)
498 {
499     uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
500     uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
501 
502     memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
503     memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
504 
505     memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
506     memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
507 }
508 
509 /*
510  * XIVE Table configuration. Only EDT is supported.
511  */
512 static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
513 {
514     uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
515     uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
516     uint64_t *xive_table;
517     uint8_t max_index;
518 
519     switch (tsel) {
520     case CQ_TAR_TSEL_BLK:
521         max_index = ARRAY_SIZE(xive->blk);
522         xive_table = xive->blk;
523         break;
524     case CQ_TAR_TSEL_MIG:
525         max_index = ARRAY_SIZE(xive->mig);
526         xive_table = xive->mig;
527         break;
528     case CQ_TAR_TSEL_EDT:
529         max_index = ARRAY_SIZE(xive->edt);
530         xive_table = xive->edt;
531         break;
532     case CQ_TAR_TSEL_VDT:
533         max_index = ARRAY_SIZE(xive->vdt);
534         xive_table = xive->vdt;
535         break;
536     default:
537         xive_error(xive, "IC: invalid table %d", (int) tsel);
538         return -1;
539     }
540 
541     if (tsel_index >= max_index) {
542         xive_error(xive, "IC: invalid index %d", (int) tsel_index);
543         return -1;
544     }
545 
546     xive_table[tsel_index] = val;
547 
548     if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
549         xive->regs[CQ_TAR >> 3] =
550             SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
551     }
552 
553     /*
554      * EDT configuration is complete. Resize the MMIO windows exposing
555      * the IPI and the END ESBs in the VC region.
556      */
557     if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
558         pnv_xive_edt_resize(xive);
559     }
560 
561     return 0;
562 }
563 
564 /*
565  * Virtual Structure Tables (VST) configuration
566  */
567 static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
568                                        uint8_t blk, uint64_t vsd)
569 {
570     XiveENDSource *end_xsrc = &xive->end_source;
571     XiveSource *xsrc = &xive->ipi_source;
572     const XiveVstInfo *info = &vst_infos[type];
573     uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
574     uint64_t vst_tsize = 1ull << page_shift;
575     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
576 
577     /* Basic checks */
578 
579     if (VSD_INDIRECT & vsd) {
580         if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
581             xive_error(xive, "VST: %s indirect tables are not enabled",
582                        info->name);
583             return;
584         }
585 
586         if (!pnv_xive_vst_page_size_allowed(page_shift)) {
587             xive_error(xive, "VST: invalid %s page shift %d", info->name,
588                        page_shift);
589             return;
590         }
591     }
592 
593     if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
594         xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
595                    " page shift %d", info->name, vst_addr, page_shift);
596         return;
597     }
598 
599     /* Record the table configuration (in SRAM on HW) */
600     xive->vsds[type][blk] = vsd;
601 
602     /* Now tune the models with the configuration provided by the FW */
603 
604     switch (type) {
605     case VST_TSEL_IVT:  /* Nothing to be done */
606         break;
607 
608     case VST_TSEL_EQDT:
609         /*
610          * Backing store pages for the END.
611          *
612          * If the table is direct, we can compute the number of PQ
613          * entries provisioned by FW (such as skiboot) and resize the
614          * END ESB window accordingly.
615          */
616         if (!(VSD_INDIRECT & vsd)) {
617             memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
618                                    * (1ull << xsrc->esb_shift));
619         }
620         memory_region_add_subregion(&xive->end_edt_mmio, 0,
621                                     &end_xsrc->esb_mmio);
622         break;
623 
624     case VST_TSEL_SBE:
625         /*
626          * Backing store pages for the source PQ bits. The model does
627          * not use these PQ bits backed in RAM because the XiveSource
628          * model has its own.
629          *
630          * If the table is direct, we can compute the number of PQ
631          * entries provisioned by FW (such as skiboot) and resize the
632          * ESB window accordingly.
633          */
634         if (!(VSD_INDIRECT & vsd)) {
635             memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
636                                    * (1ull << xsrc->esb_shift));
637         }
638         memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
639         break;
640 
641     case VST_TSEL_VPDT: /* Not modeled */
642     case VST_TSEL_IRQ:  /* Not modeled */
643         /*
644          * These tables contains the backing store pages for the
645          * interrupt fifos of the VC sub-engine in case of overflow.
646          */
647         break;
648 
649     default:
650         g_assert_not_reached();
651     }
652 }
653 
654 /*
655  * Both PC and VC sub-engines are configured as each use the Virtual
656  * Structure Tables : SBE, EAS, END and NVT.
657  */
658 static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
659 {
660     uint8_t mode = GETFIELD(VSD_MODE, vsd);
661     uint8_t type = GETFIELD(VST_TABLE_SELECT,
662                             xive->regs[VC_VSD_TABLE_ADDR >> 3]);
663     uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
664                            xive->regs[VC_VSD_TABLE_ADDR >> 3]);
665     uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
666 
667     if (type > VST_TSEL_IRQ) {
668         xive_error(xive, "VST: invalid table type %d", type);
669         return;
670     }
671 
672     if (blk >= vst_infos[type].max_blocks) {
673         xive_error(xive, "VST: invalid block id %d for"
674                       " %s table", blk, vst_infos[type].name);
675         return;
676     }
677 
678     /*
679      * Only take the VC sub-engine configuration into account because
680      * the XiveRouter model combines both VC and PC sub-engines
681      */
682     if (pc_engine) {
683         return;
684     }
685 
686     if (!vst_addr) {
687         xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
688         return;
689     }
690 
691     switch (mode) {
692     case VSD_MODE_FORWARD:
693         xive->vsds[type][blk] = vsd;
694         break;
695 
696     case VSD_MODE_EXCLUSIVE:
697         pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
698         break;
699 
700     default:
701         xive_error(xive, "VST: unsupported table mode %d", mode);
702         return;
703     }
704 }
705 
706 /*
707  * Interrupt controller MMIO region. The layout is compatible between
708  * 4K and 64K pages :
709  *
710  * Page 0           sub-engine BARs
711  *  0x000 - 0x3FF   IC registers
712  *  0x400 - 0x7FF   PC registers
713  *  0x800 - 0xFFF   VC registers
714  *
715  * Page 1           Notify page (writes only)
716  *  0x000 - 0x7FF   HW interrupt triggers (PSI, PHB)
717  *  0x800 - 0xFFF   forwards and syncs
718  *
719  * Page 2           LSI Trigger page (writes only) (not modeled)
720  * Page 3           LSI SB EOI page (reads only) (not modeled)
721  *
722  * Page 4-7         indirect TIMA
723  */
724 
725 /*
726  * IC - registers MMIO
727  */
728 static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
729                                   uint64_t val, unsigned size)
730 {
731     PnvXive *xive = PNV_XIVE(opaque);
732     MemoryRegion *sysmem = get_system_memory();
733     uint32_t reg = offset >> 3;
734     bool is_chip0 = xive->chip->chip_id == 0;
735 
736     switch (offset) {
737 
738     /*
739      * XIVE CQ (PowerBus bridge) settings
740      */
741     case CQ_MSGSND:     /* msgsnd for doorbells */
742     case CQ_FIRMASK_OR: /* FIR error reporting */
743         break;
744     case CQ_PBI_CTL:
745         if (val & CQ_PBI_PC_64K) {
746             xive->pc_shift = 16;
747         }
748         if (val & CQ_PBI_VC_64K) {
749             xive->vc_shift = 16;
750         }
751         break;
752     case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
753         /*
754          * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
755          */
756         break;
757 
758     /*
759      * XIVE Virtualization Controller settings
760      */
761     case VC_GLOBAL_CONFIG:
762         break;
763 
764     /*
765      * XIVE Presenter Controller settings
766      */
767     case PC_GLOBAL_CONFIG:
768         /*
769          * PC_GCONF_CHIPID_OVR
770          *   Overrides Int command Chip ID with the Chip ID field (DEBUG)
771          */
772         break;
773     case PC_TCTXT_CFG:
774         /*
775          * TODO: block group support
776          *
777          * PC_TCTXT_CFG_BLKGRP_EN
778          * PC_TCTXT_CFG_HARD_CHIPID_BLK :
779          *   Moves the chipid into block field for hardwired CAM compares.
780          *   Block offset value is adjusted to 0b0..01 & ThrdId
781          *
782          *   Will require changes in xive_presenter_tctx_match(). I am
783          *   not sure how to handle that yet.
784          */
785 
786         /* Overrides hardwired chip ID with the chip ID field */
787         if (val & PC_TCTXT_CHIPID_OVERRIDE) {
788             xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
789         }
790         break;
791     case PC_TCTXT_TRACK:
792         /*
793          * PC_TCTXT_TRACK_EN:
794          *   enable block tracking and exchange of block ownership
795          *   information between Interrupt controllers
796          */
797         break;
798 
799     /*
800      * Misc settings
801      */
802     case VC_SBC_CONFIG: /* Store EOI configuration */
803         /*
804          * Configure store EOI if required by firwmare (skiboot has removed
805          * support recently though)
806          */
807         if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
808             xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
809         }
810         break;
811 
812     case VC_EQC_CONFIG: /* TODO: silent escalation */
813     case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
814         break;
815 
816     /*
817      * XIVE BAR settings (XSCOM only)
818      */
819     case CQ_RST_CTL:
820         /* bit4: resets all BAR registers */
821         break;
822 
823     case CQ_IC_BAR: /* IC BAR. 8 pages */
824         xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
825         if (!(val & CQ_IC_BAR_VALID)) {
826             xive->ic_base = 0;
827             if (xive->regs[reg] & CQ_IC_BAR_VALID) {
828                 memory_region_del_subregion(&xive->ic_mmio,
829                                             &xive->ic_reg_mmio);
830                 memory_region_del_subregion(&xive->ic_mmio,
831                                             &xive->ic_notify_mmio);
832                 memory_region_del_subregion(&xive->ic_mmio,
833                                             &xive->ic_lsi_mmio);
834                 memory_region_del_subregion(&xive->ic_mmio,
835                                             &xive->tm_indirect_mmio);
836 
837                 memory_region_del_subregion(sysmem, &xive->ic_mmio);
838             }
839         } else {
840             xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
841             if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
842                 memory_region_add_subregion(sysmem, xive->ic_base,
843                                             &xive->ic_mmio);
844 
845                 memory_region_add_subregion(&xive->ic_mmio,  0,
846                                             &xive->ic_reg_mmio);
847                 memory_region_add_subregion(&xive->ic_mmio,
848                                             1ul << xive->ic_shift,
849                                             &xive->ic_notify_mmio);
850                 memory_region_add_subregion(&xive->ic_mmio,
851                                             2ul << xive->ic_shift,
852                                             &xive->ic_lsi_mmio);
853                 memory_region_add_subregion(&xive->ic_mmio,
854                                             4ull << xive->ic_shift,
855                                             &xive->tm_indirect_mmio);
856             }
857         }
858         break;
859 
860     case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
861     case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
862         xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
863         if (!(val & CQ_TM_BAR_VALID)) {
864             xive->tm_base = 0;
865             if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
866                 memory_region_del_subregion(sysmem, &xive->tm_mmio);
867             }
868         } else {
869             xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
870             if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
871                 memory_region_add_subregion(sysmem, xive->tm_base,
872                                             &xive->tm_mmio);
873             }
874         }
875         break;
876 
877     case CQ_PC_BARM:
878         xive->regs[reg] = val;
879         memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
880         break;
881     case CQ_PC_BAR: /* From 32M to 512G */
882         if (!(val & CQ_PC_BAR_VALID)) {
883             xive->pc_base = 0;
884             if (xive->regs[reg] & CQ_PC_BAR_VALID) {
885                 memory_region_del_subregion(sysmem, &xive->pc_mmio);
886             }
887         } else {
888             xive->pc_base = val & ~(CQ_PC_BAR_VALID);
889             if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
890                 memory_region_add_subregion(sysmem, xive->pc_base,
891                                             &xive->pc_mmio);
892             }
893         }
894         break;
895 
896     case CQ_VC_BARM:
897         xive->regs[reg] = val;
898         memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
899         break;
900     case CQ_VC_BAR: /* From 64M to 4TB */
901         if (!(val & CQ_VC_BAR_VALID)) {
902             xive->vc_base = 0;
903             if (xive->regs[reg] & CQ_VC_BAR_VALID) {
904                 memory_region_del_subregion(sysmem, &xive->vc_mmio);
905             }
906         } else {
907             xive->vc_base = val & ~(CQ_VC_BAR_VALID);
908             if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
909                 memory_region_add_subregion(sysmem, xive->vc_base,
910                                             &xive->vc_mmio);
911             }
912         }
913         break;
914 
915     /*
916      * XIVE Table settings.
917      */
918     case CQ_TAR: /* Table Address */
919         break;
920     case CQ_TDR: /* Table Data */
921         pnv_xive_table_set_data(xive, val);
922         break;
923 
924     /*
925      * XIVE VC & PC Virtual Structure Table settings
926      */
927     case VC_VSD_TABLE_ADDR:
928     case PC_VSD_TABLE_ADDR: /* Virtual table selector */
929         break;
930     case VC_VSD_TABLE_DATA: /* Virtual table setting */
931     case PC_VSD_TABLE_DATA:
932         pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
933         break;
934 
935     /*
936      * Interrupt fifo overflow in memory backing store (Not modeled)
937      */
938     case VC_IRQ_CONFIG_IPI:
939     case VC_IRQ_CONFIG_HW:
940     case VC_IRQ_CONFIG_CASCADE1:
941     case VC_IRQ_CONFIG_CASCADE2:
942     case VC_IRQ_CONFIG_REDIST:
943     case VC_IRQ_CONFIG_IPI_CASC:
944         break;
945 
946     /*
947      * XIVE hardware thread enablement
948      */
949     case PC_THREAD_EN_REG0: /* Physical Thread Enable */
950     case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
951         break;
952 
953     case PC_THREAD_EN_REG0_SET:
954         xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
955         break;
956     case PC_THREAD_EN_REG1_SET:
957         xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
958         break;
959     case PC_THREAD_EN_REG0_CLR:
960         xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
961         break;
962     case PC_THREAD_EN_REG1_CLR:
963         xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
964         break;
965 
966     /*
967      * Indirect TIMA access set up. Defines the PIR of the HW thread
968      * to use.
969      */
970     case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
971         break;
972 
973     /*
974      * XIVE PC & VC cache updates for EAS, NVT and END
975      */
976     case VC_IVC_SCRUB_MASK:
977     case VC_IVC_SCRUB_TRIG:
978         break;
979 
980     case VC_EQC_CWATCH_SPEC:
981         val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
982         break;
983     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
984         break;
985     case VC_EQC_CWATCH_DAT0:
986         /* writing to DATA0 triggers the cache write */
987         xive->regs[reg] = val;
988         pnv_xive_end_update(xive);
989         break;
990     case VC_EQC_SCRUB_MASK:
991     case VC_EQC_SCRUB_TRIG:
992         /*
993          * The scrubbing registers flush the cache in RAM and can also
994          * invalidate.
995          */
996         break;
997 
998     case PC_VPC_CWATCH_SPEC:
999         val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
1000         break;
1001     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1002         break;
1003     case PC_VPC_CWATCH_DAT0:
1004         /* writing to DATA0 triggers the cache write */
1005         xive->regs[reg] = val;
1006         pnv_xive_nvt_update(xive);
1007         break;
1008     case PC_VPC_SCRUB_MASK:
1009     case PC_VPC_SCRUB_TRIG:
1010         /*
1011          * The scrubbing registers flush the cache in RAM and can also
1012          * invalidate.
1013          */
1014         break;
1015 
1016 
1017     /*
1018      * XIVE PC & VC cache invalidation
1019      */
1020     case PC_AT_KILL:
1021         break;
1022     case VC_AT_MACRO_KILL:
1023         break;
1024     case PC_AT_KILL_MASK:
1025     case VC_AT_MACRO_KILL_MASK:
1026         break;
1027 
1028     default:
1029         xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1030         return;
1031     }
1032 
1033     xive->regs[reg] = val;
1034 }
1035 
1036 static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1037 {
1038     PnvXive *xive = PNV_XIVE(opaque);
1039     uint64_t val = 0;
1040     uint32_t reg = offset >> 3;
1041 
1042     switch (offset) {
1043     case CQ_CFG_PB_GEN:
1044     case CQ_IC_BAR:
1045     case CQ_TM1_BAR:
1046     case CQ_TM2_BAR:
1047     case CQ_PC_BAR:
1048     case CQ_PC_BARM:
1049     case CQ_VC_BAR:
1050     case CQ_VC_BARM:
1051     case CQ_TAR:
1052     case CQ_TDR:
1053     case CQ_PBI_CTL:
1054 
1055     case PC_TCTXT_CFG:
1056     case PC_TCTXT_TRACK:
1057     case PC_TCTXT_INDIR0:
1058     case PC_TCTXT_INDIR1:
1059     case PC_TCTXT_INDIR2:
1060     case PC_TCTXT_INDIR3:
1061     case PC_GLOBAL_CONFIG:
1062 
1063     case PC_VPC_SCRUB_MASK:
1064 
1065     case VC_GLOBAL_CONFIG:
1066     case VC_AIB_TX_ORDER_TAG2:
1067 
1068     case VC_IRQ_CONFIG_IPI:
1069     case VC_IRQ_CONFIG_HW:
1070     case VC_IRQ_CONFIG_CASCADE1:
1071     case VC_IRQ_CONFIG_CASCADE2:
1072     case VC_IRQ_CONFIG_REDIST:
1073     case VC_IRQ_CONFIG_IPI_CASC:
1074 
1075     case VC_EQC_SCRUB_MASK:
1076     case VC_IVC_SCRUB_MASK:
1077     case VC_SBC_CONFIG:
1078     case VC_AT_MACRO_KILL_MASK:
1079     case VC_VSD_TABLE_ADDR:
1080     case PC_VSD_TABLE_ADDR:
1081     case VC_VSD_TABLE_DATA:
1082     case PC_VSD_TABLE_DATA:
1083     case PC_THREAD_EN_REG0:
1084     case PC_THREAD_EN_REG1:
1085         val = xive->regs[reg];
1086         break;
1087 
1088     /*
1089      * XIVE hardware thread enablement
1090      */
1091     case PC_THREAD_EN_REG0_SET:
1092     case PC_THREAD_EN_REG0_CLR:
1093         val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1094         break;
1095     case PC_THREAD_EN_REG1_SET:
1096     case PC_THREAD_EN_REG1_CLR:
1097         val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1098         break;
1099 
1100     case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
1101         val = 0xffffff0000000000;
1102         break;
1103 
1104     /*
1105      * XIVE PC & VC cache updates for EAS, NVT and END
1106      */
1107     case VC_EQC_CWATCH_SPEC:
1108         xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1109         val = xive->regs[reg];
1110         break;
1111     case VC_EQC_CWATCH_DAT0:
1112         /*
1113          * Load DATA registers from cache with data requested by the
1114          * SPEC register
1115          */
1116         pnv_xive_end_cache_load(xive);
1117         val = xive->regs[reg];
1118         break;
1119     case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1120         val = xive->regs[reg];
1121         break;
1122 
1123     case PC_VPC_CWATCH_SPEC:
1124         xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1125         val = xive->regs[reg];
1126         break;
1127     case PC_VPC_CWATCH_DAT0:
1128         /*
1129          * Load DATA registers from cache with data requested by the
1130          * SPEC register
1131          */
1132         pnv_xive_nvt_cache_load(xive);
1133         val = xive->regs[reg];
1134         break;
1135     case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1136         val = xive->regs[reg];
1137         break;
1138 
1139     case PC_VPC_SCRUB_TRIG:
1140     case VC_IVC_SCRUB_TRIG:
1141     case VC_EQC_SCRUB_TRIG:
1142         xive->regs[reg] &= ~VC_SCRUB_VALID;
1143         val = xive->regs[reg];
1144         break;
1145 
1146     /*
1147      * XIVE PC & VC cache invalidation
1148      */
1149     case PC_AT_KILL:
1150         xive->regs[reg] &= ~PC_AT_KILL_VALID;
1151         val = xive->regs[reg];
1152         break;
1153     case VC_AT_MACRO_KILL:
1154         xive->regs[reg] &= ~VC_KILL_VALID;
1155         val = xive->regs[reg];
1156         break;
1157 
1158     /*
1159      * XIVE synchronisation
1160      */
1161     case VC_EQC_CONFIG:
1162         val = VC_EQC_SYNC_MASK;
1163         break;
1164 
1165     default:
1166         xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1167     }
1168 
1169     return val;
1170 }
1171 
1172 static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1173     .read = pnv_xive_ic_reg_read,
1174     .write = pnv_xive_ic_reg_write,
1175     .endianness = DEVICE_BIG_ENDIAN,
1176     .valid = {
1177         .min_access_size = 8,
1178         .max_access_size = 8,
1179     },
1180     .impl = {
1181         .min_access_size = 8,
1182         .max_access_size = 8,
1183     },
1184 };
1185 
1186 /*
1187  * IC - Notify MMIO port page (write only)
1188  */
1189 #define PNV_XIVE_FORWARD_IPI        0x800 /* Forward IPI */
1190 #define PNV_XIVE_FORWARD_HW         0x880 /* Forward HW */
1191 #define PNV_XIVE_FORWARD_OS_ESC     0x900 /* Forward OS escalation */
1192 #define PNV_XIVE_FORWARD_HW_ESC     0x980 /* Forward Hyp escalation */
1193 #define PNV_XIVE_FORWARD_REDIS      0xa00 /* Forward Redistribution */
1194 #define PNV_XIVE_RESERVED5          0xa80 /* Cache line 5 PowerBUS operation */
1195 #define PNV_XIVE_RESERVED6          0xb00 /* Cache line 6 PowerBUS operation */
1196 #define PNV_XIVE_RESERVED7          0xb80 /* Cache line 7 PowerBUS operation */
1197 
1198 /* VC synchronisation */
1199 #define PNV_XIVE_SYNC_IPI           0xc00 /* Sync IPI */
1200 #define PNV_XIVE_SYNC_HW            0xc80 /* Sync HW */
1201 #define PNV_XIVE_SYNC_OS_ESC        0xd00 /* Sync OS escalation */
1202 #define PNV_XIVE_SYNC_HW_ESC        0xd80 /* Sync Hyp escalation */
1203 #define PNV_XIVE_SYNC_REDIS         0xe00 /* Sync Redistribution */
1204 
1205 /* PC synchronisation */
1206 #define PNV_XIVE_SYNC_PULL          0xe80 /* Sync pull context */
1207 #define PNV_XIVE_SYNC_PUSH          0xf00 /* Sync push context */
1208 #define PNV_XIVE_SYNC_VPC           0xf80 /* Sync remove VPC store */
1209 
1210 static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1211 {
1212     uint8_t blk;
1213     uint32_t idx;
1214 
1215     if (val & XIVE_TRIGGER_END) {
1216         xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1217                    addr, val);
1218         return;
1219     }
1220 
1221     /*
1222      * Forward the source event notification directly to the Router.
1223      * The source interrupt number should already be correctly encoded
1224      * with the chip block id by the sending device (PHB, PSI).
1225      */
1226     blk = XIVE_EAS_BLOCK(val);
1227     idx = XIVE_EAS_INDEX(val);
1228 
1229     xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
1230 }
1231 
1232 static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1233                                      unsigned size)
1234 {
1235     PnvXive *xive = PNV_XIVE(opaque);
1236 
1237     /* VC: HW triggers */
1238     switch (addr) {
1239     case 0x000 ... 0x7FF:
1240         pnv_xive_ic_hw_trigger(opaque, addr, val);
1241         break;
1242 
1243     /* VC: Forwarded IRQs */
1244     case PNV_XIVE_FORWARD_IPI:
1245     case PNV_XIVE_FORWARD_HW:
1246     case PNV_XIVE_FORWARD_OS_ESC:
1247     case PNV_XIVE_FORWARD_HW_ESC:
1248     case PNV_XIVE_FORWARD_REDIS:
1249         /* TODO: forwarded IRQs. Should be like HW triggers */
1250         xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1251                    addr, val);
1252         break;
1253 
1254     /* VC syncs */
1255     case PNV_XIVE_SYNC_IPI:
1256     case PNV_XIVE_SYNC_HW:
1257     case PNV_XIVE_SYNC_OS_ESC:
1258     case PNV_XIVE_SYNC_HW_ESC:
1259     case PNV_XIVE_SYNC_REDIS:
1260         break;
1261 
1262     /* PC syncs */
1263     case PNV_XIVE_SYNC_PULL:
1264     case PNV_XIVE_SYNC_PUSH:
1265     case PNV_XIVE_SYNC_VPC:
1266         break;
1267 
1268     default:
1269         xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1270     }
1271 }
1272 
1273 static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1274                                         unsigned size)
1275 {
1276     PnvXive *xive = PNV_XIVE(opaque);
1277 
1278     /* loads are invalid */
1279     xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1280     return -1;
1281 }
1282 
1283 static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1284     .read = pnv_xive_ic_notify_read,
1285     .write = pnv_xive_ic_notify_write,
1286     .endianness = DEVICE_BIG_ENDIAN,
1287     .valid = {
1288         .min_access_size = 8,
1289         .max_access_size = 8,
1290     },
1291     .impl = {
1292         .min_access_size = 8,
1293         .max_access_size = 8,
1294     },
1295 };
1296 
1297 /*
1298  * IC - LSI MMIO handlers (not modeled)
1299  */
1300 
1301 static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1302                               uint64_t val, unsigned size)
1303 {
1304     PnvXive *xive = PNV_XIVE(opaque);
1305 
1306     xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1307 }
1308 
1309 static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1310 {
1311     PnvXive *xive = PNV_XIVE(opaque);
1312 
1313     xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1314     return -1;
1315 }
1316 
1317 static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1318     .read = pnv_xive_ic_lsi_read,
1319     .write = pnv_xive_ic_lsi_write,
1320     .endianness = DEVICE_BIG_ENDIAN,
1321     .valid = {
1322         .min_access_size = 8,
1323         .max_access_size = 8,
1324     },
1325     .impl = {
1326         .min_access_size = 8,
1327         .max_access_size = 8,
1328     },
1329 };
1330 
1331 /*
1332  * IC - Indirect TIMA MMIO handlers
1333  */
1334 
1335 /*
1336  * When the TIMA is accessed from the indirect page, the thread id
1337  * (PIR) has to be configured in the IC registers before. This is used
1338  * for resets and for debug purpose also.
1339  */
1340 static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1341 {
1342     uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1343     PowerPCCPU *cpu = NULL;
1344     int pir;
1345 
1346     if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1347         xive_error(xive, "IC: no indirect TIMA access in progress");
1348         return NULL;
1349     }
1350 
1351     pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff;
1352     cpu = ppc_get_vcpu_by_pir(pir);
1353     if (!cpu) {
1354         xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1355         return NULL;
1356     }
1357 
1358     /* Check that HW thread is XIVE enabled */
1359     if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
1360         xive_error(xive, "IC: CPU %x is not enabled", pir);
1361     }
1362 
1363     return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1364 }
1365 
1366 static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1367                                    uint64_t value, unsigned size)
1368 {
1369     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1370 
1371     xive_tctx_tm_write(tctx, offset, value, size);
1372 }
1373 
1374 static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1375                                       unsigned size)
1376 {
1377     XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1378 
1379     return xive_tctx_tm_read(tctx, offset, size);
1380 }
1381 
1382 static const MemoryRegionOps xive_tm_indirect_ops = {
1383     .read = xive_tm_indirect_read,
1384     .write = xive_tm_indirect_write,
1385     .endianness = DEVICE_BIG_ENDIAN,
1386     .valid = {
1387         .min_access_size = 1,
1388         .max_access_size = 8,
1389     },
1390     .impl = {
1391         .min_access_size = 1,
1392         .max_access_size = 8,
1393     },
1394 };
1395 
1396 /*
1397  * Interrupt controller XSCOM region.
1398  */
1399 static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1400 {
1401     switch (addr >> 3) {
1402     case X_VC_EQC_CONFIG:
1403         /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1404         return VC_EQC_SYNC_MASK;
1405     default:
1406         return pnv_xive_ic_reg_read(opaque, addr, size);
1407     }
1408 }
1409 
1410 static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1411                                 uint64_t val, unsigned size)
1412 {
1413     pnv_xive_ic_reg_write(opaque, addr, val, size);
1414 }
1415 
1416 static const MemoryRegionOps pnv_xive_xscom_ops = {
1417     .read = pnv_xive_xscom_read,
1418     .write = pnv_xive_xscom_write,
1419     .endianness = DEVICE_BIG_ENDIAN,
1420     .valid = {
1421         .min_access_size = 8,
1422         .max_access_size = 8,
1423     },
1424     .impl = {
1425         .min_access_size = 8,
1426         .max_access_size = 8,
1427     }
1428 };
1429 
1430 /*
1431  * Virtualization Controller MMIO region containing the IPI and END ESB pages
1432  */
1433 static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1434                                  unsigned size)
1435 {
1436     PnvXive *xive = PNV_XIVE(opaque);
1437     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1438     uint64_t edt_type = 0;
1439     uint64_t edt_offset;
1440     MemTxResult result;
1441     AddressSpace *edt_as = NULL;
1442     uint64_t ret = -1;
1443 
1444     if (edt_index < XIVE_TABLE_EDT_MAX) {
1445         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1446     }
1447 
1448     switch (edt_type) {
1449     case CQ_TDR_EDT_IPI:
1450         edt_as = &xive->ipi_as;
1451         break;
1452     case CQ_TDR_EDT_EQ:
1453         edt_as = &xive->end_as;
1454         break;
1455     default:
1456         xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1457         return -1;
1458     }
1459 
1460     /* Remap the offset for the targeted address space */
1461     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1462 
1463     ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1464                             &result);
1465 
1466     if (result != MEMTX_OK) {
1467         xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1468                    HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1469                    offset, edt_offset);
1470         return -1;
1471     }
1472 
1473     return ret;
1474 }
1475 
1476 static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1477                               uint64_t val, unsigned size)
1478 {
1479     PnvXive *xive = PNV_XIVE(opaque);
1480     uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1481     uint64_t edt_type = 0;
1482     uint64_t edt_offset;
1483     MemTxResult result;
1484     AddressSpace *edt_as = NULL;
1485 
1486     if (edt_index < XIVE_TABLE_EDT_MAX) {
1487         edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1488     }
1489 
1490     switch (edt_type) {
1491     case CQ_TDR_EDT_IPI:
1492         edt_as = &xive->ipi_as;
1493         break;
1494     case CQ_TDR_EDT_EQ:
1495         edt_as = &xive->end_as;
1496         break;
1497     default:
1498         xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1499                    offset);
1500         return;
1501     }
1502 
1503     /* Remap the offset for the targeted address space */
1504     edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1505 
1506     address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1507     if (result != MEMTX_OK) {
1508         xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1509     }
1510 }
1511 
1512 static const MemoryRegionOps pnv_xive_vc_ops = {
1513     .read = pnv_xive_vc_read,
1514     .write = pnv_xive_vc_write,
1515     .endianness = DEVICE_BIG_ENDIAN,
1516     .valid = {
1517         .min_access_size = 8,
1518         .max_access_size = 8,
1519     },
1520     .impl = {
1521         .min_access_size = 8,
1522         .max_access_size = 8,
1523     },
1524 };
1525 
1526 /*
1527  * Presenter Controller MMIO region. The Virtualization Controller
1528  * updates the IPB in the NVT table when required. Not modeled.
1529  */
1530 static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1531                                  unsigned size)
1532 {
1533     PnvXive *xive = PNV_XIVE(opaque);
1534 
1535     xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1536     return -1;
1537 }
1538 
1539 static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1540                               uint64_t value, unsigned size)
1541 {
1542     PnvXive *xive = PNV_XIVE(opaque);
1543 
1544     xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1545 }
1546 
1547 static const MemoryRegionOps pnv_xive_pc_ops = {
1548     .read = pnv_xive_pc_read,
1549     .write = pnv_xive_pc_write,
1550     .endianness = DEVICE_BIG_ENDIAN,
1551     .valid = {
1552         .min_access_size = 8,
1553         .max_access_size = 8,
1554     },
1555     .impl = {
1556         .min_access_size = 8,
1557         .max_access_size = 8,
1558     },
1559 };
1560 
1561 void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1562 {
1563     XiveRouter *xrtr = XIVE_ROUTER(xive);
1564     uint8_t blk = xive->chip->chip_id;
1565     uint32_t srcno0 = XIVE_EAS(blk, 0);
1566     uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1567     XiveEAS eas;
1568     XiveEND end;
1569     int i;
1570 
1571     monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1572                    srcno0 + nr_ipis - 1);
1573     xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1574 
1575     monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1576                    srcno0 + nr_ipis - 1);
1577     for (i = 0; i < nr_ipis; i++) {
1578         if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1579             break;
1580         }
1581         if (!xive_eas_is_masked(&eas)) {
1582             xive_eas_pic_print_info(&eas, i, mon);
1583         }
1584     }
1585 
1586     monitor_printf(mon, "XIVE[%x] ENDT\n", blk);
1587     i = 0;
1588     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1589         xive_end_pic_print_info(&end, i++, mon);
1590     }
1591 
1592     monitor_printf(mon, "XIVE[%x] END Escalation EAT\n", blk);
1593     i = 0;
1594     while (!xive_router_get_end(xrtr, blk, i, &end)) {
1595         xive_end_eas_pic_print_info(&end, i++, mon);
1596     }
1597 }
1598 
1599 static void pnv_xive_reset(void *dev)
1600 {
1601     PnvXive *xive = PNV_XIVE(dev);
1602     XiveSource *xsrc = &xive->ipi_source;
1603     XiveENDSource *end_xsrc = &xive->end_source;
1604 
1605     /*
1606      * Use the PnvChip id to identify the XIVE interrupt controller.
1607      * It can be overriden by configuration at runtime.
1608      */
1609     xive->tctx_chipid = xive->chip->chip_id;
1610 
1611     /* Default page size (Should be changed at runtime to 64k) */
1612     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1613 
1614     /* Clear subregions */
1615     if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1616         memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1617     }
1618 
1619     if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1620         memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1621     }
1622 
1623     if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1624         memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1625     }
1626 
1627     if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1628         memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1629     }
1630 }
1631 
1632 static void pnv_xive_init(Object *obj)
1633 {
1634     PnvXive *xive = PNV_XIVE(obj);
1635 
1636     object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1637                             sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1638                             &error_abort, NULL);
1639     object_initialize_child(obj, "end_source", &xive->end_source,
1640                             sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1641                             &error_abort, NULL);
1642 }
1643 
1644 /*
1645  *  Maximum number of IRQs and ENDs supported by HW
1646  */
1647 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1648 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1649 
1650 static void pnv_xive_realize(DeviceState *dev, Error **errp)
1651 {
1652     PnvXive *xive = PNV_XIVE(dev);
1653     XiveSource *xsrc = &xive->ipi_source;
1654     XiveENDSource *end_xsrc = &xive->end_source;
1655     Error *local_err = NULL;
1656 
1657     assert(xive->chip);
1658 
1659     /*
1660      * The XiveSource and XiveENDSource objects are realized with the
1661      * maximum allowed HW configuration. The ESB MMIO regions will be
1662      * resized dynamically when the controller is configured by the FW
1663      * to limit accesses to resources not provisioned.
1664      */
1665     object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1666                             &error_fatal);
1667     object_property_set_link(OBJECT(xsrc), OBJECT(xive), "xive",
1668                              &error_abort);
1669     object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1670     if (local_err) {
1671         error_propagate(errp, local_err);
1672         return;
1673     }
1674 
1675     object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1676                             &error_fatal);
1677     object_property_set_link(OBJECT(end_xsrc), OBJECT(xive), "xive",
1678                              &error_abort);
1679     object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1680     if (local_err) {
1681         error_propagate(errp, local_err);
1682         return;
1683     }
1684 
1685     /* Default page size. Generally changed at runtime to 64k */
1686     xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1687 
1688     /* XSCOM region, used for initial configuration of the BARs */
1689     memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1690                           xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1691 
1692     /* Interrupt controller MMIO regions */
1693     memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1694                        PNV9_XIVE_IC_SIZE);
1695 
1696     memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1697                           xive, "xive-ic-reg", 1 << xive->ic_shift);
1698     memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1699                           &pnv_xive_ic_notify_ops,
1700                           xive, "xive-ic-notify", 1 << xive->ic_shift);
1701 
1702     /* The Pervasive LSI trigger and EOI pages (not modeled) */
1703     memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1704                           xive, "xive-ic-lsi", 2 << xive->ic_shift);
1705 
1706     /* Thread Interrupt Management Area (Indirect) */
1707     memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1708                           &xive_tm_indirect_ops,
1709                           xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1710     /*
1711      * Overall Virtualization Controller MMIO region containing the
1712      * IPI ESB pages and END ESB pages. The layout is defined by the
1713      * EDT "Domain table" and the accesses are dispatched using
1714      * address spaces for each.
1715      */
1716     memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1717                           "xive-vc", PNV9_XIVE_VC_SIZE);
1718 
1719     memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1720                        PNV9_XIVE_VC_SIZE);
1721     address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1722     memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1723                        PNV9_XIVE_VC_SIZE);
1724     address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1725 
1726     /*
1727      * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1728      * VC region. Their size is configured by the FW in the EDT table.
1729      */
1730     memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1731     memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1732 
1733     /* Presenter Controller MMIO region (not modeled) */
1734     memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1735                           "xive-pc", PNV9_XIVE_PC_SIZE);
1736 
1737     /* Thread Interrupt Management Area (Direct) */
1738     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1739                           xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1740 
1741     qemu_register_reset(pnv_xive_reset, dev);
1742 }
1743 
1744 static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1745                              int xscom_offset)
1746 {
1747     const char compat[] = "ibm,power9-xive-x";
1748     char *name;
1749     int offset;
1750     uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1751     uint32_t reg[] = {
1752         cpu_to_be32(lpc_pcba),
1753         cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1754     };
1755 
1756     name = g_strdup_printf("xive@%x", lpc_pcba);
1757     offset = fdt_add_subnode(fdt, xscom_offset, name);
1758     _FDT(offset);
1759     g_free(name);
1760 
1761     _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1762     _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1763                       sizeof(compat))));
1764     return 0;
1765 }
1766 
1767 static Property pnv_xive_properties[] = {
1768     DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1769     DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1770     DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1771     DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1772     /* The PnvChip id identifies the XIVE interrupt controller. */
1773     DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1774     DEFINE_PROP_END_OF_LIST(),
1775 };
1776 
1777 static void pnv_xive_class_init(ObjectClass *klass, void *data)
1778 {
1779     DeviceClass *dc = DEVICE_CLASS(klass);
1780     PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1781     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1782     XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1783 
1784     xdc->dt_xscom = pnv_xive_dt_xscom;
1785 
1786     dc->desc = "PowerNV XIVE Interrupt Controller";
1787     dc->realize = pnv_xive_realize;
1788     dc->props = pnv_xive_properties;
1789 
1790     xrc->get_eas = pnv_xive_get_eas;
1791     xrc->get_end = pnv_xive_get_end;
1792     xrc->write_end = pnv_xive_write_end;
1793     xrc->get_nvt = pnv_xive_get_nvt;
1794     xrc->write_nvt = pnv_xive_write_nvt;
1795     xrc->get_tctx = pnv_xive_get_tctx;
1796 
1797     xnc->notify = pnv_xive_notify;
1798 };
1799 
1800 static const TypeInfo pnv_xive_info = {
1801     .name          = TYPE_PNV_XIVE,
1802     .parent        = TYPE_XIVE_ROUTER,
1803     .instance_init = pnv_xive_init,
1804     .instance_size = sizeof(PnvXive),
1805     .class_init    = pnv_xive_class_init,
1806     .interfaces    = (InterfaceInfo[]) {
1807         { TYPE_PNV_XSCOM_INTERFACE },
1808         { }
1809     }
1810 };
1811 
1812 static void pnv_xive_register_types(void)
1813 {
1814     type_register_static(&pnv_xive_info);
1815 }
1816 
1817 type_init(pnv_xive_register_types)
1818